Compare commits
66 Commits
feat/imple
...
recovered-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f37ef2c3a3 | ||
|
|
887d9bffa7 | ||
|
|
ac0c2e3854 | ||
|
|
3628951120 | ||
|
|
9620e032ac | ||
|
|
597a2f7494 | ||
|
|
b8362bbbe7 | ||
|
|
c280f963fe | ||
|
|
880a98e8e2 | ||
|
|
a8538b2e9c | ||
|
|
7c7f205350 | ||
|
|
7f214b76d7 | ||
|
|
684ae52542 | ||
|
|
4ae97f145e | ||
|
|
b684753a35 | ||
|
|
bda54f3296 | ||
|
|
ab38a48599 | ||
|
|
e519a832f6 | ||
|
|
433c5df414 | ||
|
|
225a0781e9 | ||
|
|
52adb5c2f6 | ||
|
|
9869ebe045 | ||
|
|
40ed37b166 | ||
|
|
3cdaff6c66 | ||
|
|
6161febbde | ||
|
|
281c476738 | ||
|
|
78840a1f45 | ||
|
|
6bbc1b4499 | ||
|
|
65e0fcc328 | ||
|
|
e90f822bdd | ||
|
|
22bd13c197 | ||
|
|
059ce5e716 | ||
|
|
38a2805dd8 | ||
|
|
036a7bd2d3 | ||
|
|
b58badec36 | ||
|
|
f7970a542e | ||
|
|
ac6b0a3f14 | ||
|
|
6f87faa9dc | ||
|
|
c58d4b51ef | ||
|
|
9730576a03 | ||
|
|
a6a94e3a18 | ||
|
|
38c368a745 | ||
|
|
f032116961 | ||
|
|
c274c77aa7 | ||
|
|
1c72c88a32 | ||
|
|
f007df06d8 | ||
|
|
6481f725aa | ||
|
|
a3abf194ad | ||
|
|
0b6207c882 | ||
|
|
1bb1309ef8 | ||
|
|
5296e50b6a | ||
|
|
b2b1a1ef8f | ||
|
|
20d04b243b | ||
|
|
7cd94959b9 | ||
|
|
407a4e880d | ||
|
|
d822dc08fe | ||
|
|
5914771636 | ||
|
|
9d1ec10c34 | ||
|
|
7d90d6808d | ||
|
|
14a3512325 | ||
|
|
a186cb43e3 | ||
|
|
74dcf3b5f4 | ||
|
|
a588098fca | ||
|
|
99426d9bb1 | ||
|
|
151c31e550 | ||
|
|
26a37d28ce |
126
.changeset/two-bats-smoke.md
Normal file
126
.changeset/two-bats-smoke.md
Normal file
@@ -0,0 +1,126 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
- Adjusts the MCP server invokation in the mcp.json we ship with `task-master init`. Fully functional now.
|
||||
- Rename the npx -y command. It's now `npx -y task-master-ai task-master-mcp`
|
||||
- Rename MCP tools to better align with API conventions and natural language in client chat:
|
||||
- Rename `list-tasks` to `get-tasks` for more intuitive client requests like "get my tasks"
|
||||
- Rename `show-task` to `get-task` for consistency with GET-based API naming conventions
|
||||
|
||||
- **Optimize MCP response payloads:**
|
||||
- Add custom `processTaskResponse` function to `get-task` MCP tool to filter out unnecessary `allTasks` array data
|
||||
- Significantly reduce response size by returning only the specific requested task instead of all tasks
|
||||
- Preserve dependency status relationships for the UI/CLI while keeping MCP responses lean and efficient
|
||||
|
||||
- **Implement complete remove-task functionality:**
|
||||
- Add `removeTask` core function to permanently delete tasks or subtasks from tasks.json
|
||||
- Implement CLI command `remove-task` with confirmation prompt and force flag support
|
||||
- Create MCP `remove_task` tool for AI-assisted task removal
|
||||
- Automatically handle dependency cleanup by removing references to deleted tasks
|
||||
- Update task files after removal to maintain consistency
|
||||
- Provide robust error handling and detailed feedback messages
|
||||
|
||||
- **Update Cursor rules and documentation:**
|
||||
- Enhance `new_features.mdc` with comprehensive guidelines for implementing removal commands
|
||||
- Update `commands.mdc` with best practices for confirmation flows and cleanup procedures
|
||||
- Expand `mcp.mdc` with detailed instructions for MCP tool implementation patterns
|
||||
- Add examples of proper error handling and parameter validation to all relevant rules
|
||||
- Include new sections about handling dependencies during task removal operations
|
||||
- Document naming conventions and implementation patterns for destructive operations
|
||||
|
||||
- **Implement silent mode across all direct functions:**
|
||||
- Add `enableSilentMode` and `disableSilentMode` utility imports to all direct function files
|
||||
- Wrap all core function calls with silent mode to prevent console logs from interfering with JSON responses
|
||||
- Add comprehensive error handling to ensure silent mode is disabled even when errors occur
|
||||
- Fix "Unexpected token 'I', "[INFO] Gene"... is not valid JSON" errors by suppressing log output
|
||||
- Apply consistent silent mode pattern across all MCP direct functions
|
||||
- Maintain clean JSON responses for better integration with client tools
|
||||
|
||||
- **Implement AsyncOperationManager for background task processing:**
|
||||
- Add new `async-manager.js` module to handle long-running operations asynchronously
|
||||
- Support background execution of computationally intensive tasks like expansion and analysis
|
||||
- Implement unique operation IDs with UUID generation for reliable tracking
|
||||
- Add operation status tracking (pending, running, completed, failed)
|
||||
- Create `get_operation_status` MCP tool to check on background task progress
|
||||
- Forward progress reporting from background tasks to the client
|
||||
- Implement operation history with automatic cleanup of completed operations
|
||||
- Support proper error handling in background tasks with detailed status reporting
|
||||
- Maintain context (log, session) for background operations ensuring consistent behavior
|
||||
|
||||
- **Implement initialize_project command:**
|
||||
- Add new MCP tool to allow project setup via integrated MCP clients
|
||||
- Create `initialize_project` direct function with proper parameter handling
|
||||
- Improve onboarding experience by adding to mcp.json configuration
|
||||
- Support project-specific metadata like name, description, and version
|
||||
- Handle shell alias creation with proper confirmation
|
||||
- Improve first-time user experience in AI environments
|
||||
|
||||
- **Refactor project root handling for MCP Server:**
|
||||
- **Prioritize Session Roots**: MCP tools now extract the project root path directly from `session.roots[0].uri` provided by the client (e.g., Cursor).
|
||||
- **New Utility `getProjectRootFromSession`**: Added to `mcp-server/src/tools/utils.js` to encapsulate session root extraction and decoding. **Further refined for more reliable detection, especially in integrated environments, including deriving root from script path and avoiding fallback to '/'.**
|
||||
- **Simplify `findTasksJsonPath`**: The core path finding utility in `mcp-server/src/core/utils/path-utils.js` now prioritizes the `projectRoot` passed in `args` (originating from the session). Removed checks for `TASK_MASTER_PROJECT_ROOT` env var (we do not use this anymore) and package directory fallback. **Enhanced error handling to include detailed debug information (paths searched, CWD, server dir, etc.) and clearer potential solutions when `tasks.json` is not found.**
|
||||
- **Retain CLI Fallbacks**: Kept `lastFoundProjectRoot` cache check and CWD search in `findTasksJsonPath` for compatibility with direct CLI usage.
|
||||
|
||||
- Updated all MCP tools to use the new project root handling:
|
||||
- Tools now call `getProjectRootFromSession` to determine the root.
|
||||
- This root is passed explicitly as `projectRoot` in the `args` object to the corresponding `*Direct` function.
|
||||
- Direct functions continue to use the (now simplified) `findTasksJsonPath` to locate `tasks.json` within the provided root.
|
||||
- This ensures tools work reliably in integrated environments without requiring the user to specify `--project-root`.
|
||||
|
||||
- Add comprehensive PROJECT_MARKERS array for detecting common project files (used in CLI fallback logic).
|
||||
- Improved error messages with specific troubleshooting guidance.
|
||||
- **Enhanced logging:**
|
||||
- Indicate the source of project root selection more clearly.
|
||||
- **Add verbose logging in `get-task.js` to trace session object content and resolved project root path, aiding debugging.**
|
||||
|
||||
- DRY refactoring by centralizing path utilities in `core/utils/path-utils.js` and session handling in `tools/utils.js`.
|
||||
- Keep caching of `lastFoundProjectRoot` for CLI performance.
|
||||
|
||||
- Split monolithic task-master-core.js into separate function files within direct-functions directory.
|
||||
- Implement update-task MCP command for updating a single task by ID.
|
||||
- Implement update-subtask MCP command for appending information to specific subtasks.
|
||||
- Implement generate MCP command for creating individual task files from tasks.json.
|
||||
- Implement set-status MCP command for updating task status.
|
||||
- Implement get-task MCP command for displaying detailed task information (renamed from show-task).
|
||||
- Implement next-task MCP command for finding the next task to work on.
|
||||
- Implement expand-task MCP command for breaking down tasks into subtasks.
|
||||
- Implement add-task MCP command for creating new tasks using AI assistance.
|
||||
- Implement add-subtask MCP command for adding subtasks to existing tasks.
|
||||
- Implement remove-subtask MCP command for removing subtasks from parent tasks.
|
||||
- Implement expand-all MCP command for expanding all tasks into subtasks.
|
||||
- Implement analyze-complexity MCP command for analyzing task complexity.
|
||||
- Implement clear-subtasks MCP command for clearing subtasks from parent tasks.
|
||||
- Implement remove-dependency MCP command for removing dependencies from tasks.
|
||||
- Implement validate-dependencies MCP command for checking validity of task dependencies.
|
||||
- Implement fix-dependencies MCP command for automatically fixing invalid dependencies.
|
||||
- Implement complexity-report MCP command for displaying task complexity analysis reports.
|
||||
- Implement add-dependency MCP command for creating dependency relationships between tasks.
|
||||
- Implement get-tasks MCP command for listing all tasks (renamed from list-tasks).
|
||||
- Implement `initialize_project` MCP tool to allow project setup via MCP client and radically improve and simplify onboarding by adding to mcp.json (e.g., Cursor).
|
||||
|
||||
- Enhance documentation and tool descriptions:
|
||||
- Create new `taskmaster.mdc` Cursor rule for comprehensive MCP tool and CLI command reference.
|
||||
- Bundle taskmaster.mdc with npm package and include in project initialization.
|
||||
- Add detailed descriptions for each tool's purpose, parameters, and common use cases.
|
||||
- Include natural language patterns and keywords for better intent recognition.
|
||||
- Document parameter descriptions with clear examples and default values.
|
||||
- Add usage examples and context for each command/tool.
|
||||
- **Update documentation (`mcp.mdc`, `utilities.mdc`, `architecture.mdc`, `new_features.mdc`, `commands.mdc`) to reflect the new session-based project root handling and the preferred MCP vs. CLI interaction model.**
|
||||
- Improve clarity around project root auto-detection in tool documentation.
|
||||
- Update tool descriptions to better reflect their actual behavior and capabilities.
|
||||
- Add cross-references between related tools and commands.
|
||||
- Include troubleshooting guidance in tool descriptions.
|
||||
- **Add default values for `DEFAULT_SUBTASKS` and `DEFAULT_PRIORITY` to the example `.cursor/mcp.json` configuration.**
|
||||
|
||||
- Document MCP server naming conventions in architecture.mdc and mcp.mdc files (file names use kebab-case, direct functions use camelCase with Direct suffix, tool registration functions use camelCase with Tool suffix, and MCP tool names use snake_case).
|
||||
- Update MCP tool naming to follow more intuitive conventions that better align with natural language requests in client chat applications.
|
||||
- Enhance task show view with a color-coded progress bar for visualizing subtask completion percentage.
|
||||
- Add "cancelled" status to UI module status configurations for marking tasks as cancelled without deletion.
|
||||
- Improve MCP server resource documentation with comprehensive implementation examples and best practices.
|
||||
- Enhance progress bars with status breakdown visualization showing proportional sections for different task statuses.
|
||||
- Add improved status tracking for both tasks and subtasks with detailed counts by status.
|
||||
- Optimize progress bar display with width constraints to prevent UI overflow on smaller terminals.
|
||||
- Improve status counts display with clear text labels beside status icons for better readability.
|
||||
- Treat deferred and cancelled tasks as effectively complete for progress calculation while maintaining visual distinction.
|
||||
- **Fix `reportProgress` calls** to use the correct `{ progress, total? }` format.
|
||||
@@ -4,7 +4,17 @@
|
||||
"command": "node",
|
||||
"args": [
|
||||
"./mcp-server/server.js"
|
||||
]
|
||||
],
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||
"MODEL": "claude-3-7-sonnet-20250219",
|
||||
"PERPLEXITY_MODEL": "sonar-pro",
|
||||
"MAX_TOKENS": 64000,
|
||||
"TEMPERATURE": 0.4,
|
||||
"DEFAULT_SUBTASKS": 5,
|
||||
"DEFAULT_PRIORITY": "medium"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -85,7 +85,7 @@ alwaysApply: false
|
||||
- `parsePRDWithAI(prdContent)`: Extracts tasks from PRD content using AI.
|
||||
|
||||
- **[`utils.js`](mdc:scripts/modules/utils.js): Utility Functions and Configuration**
|
||||
- **Purpose**: Provides reusable utility functions and global configuration settings used across the application.
|
||||
- **Purpose**: Provides reusable utility functions and global configuration settings used across the **CLI application**.
|
||||
- **Responsibilities** (See also: [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc)):
|
||||
- Manages global configuration settings loaded from environment variables and defaults.
|
||||
- Implements logging utility with different log levels and output formatting.
|
||||
@@ -93,6 +93,7 @@ alwaysApply: false
|
||||
- Includes string manipulation utilities (e.g., `truncate`, `sanitizePrompt`).
|
||||
- Offers task-specific utility functions (e.g., `formatTaskId`, `findTaskById`, `taskExists`).
|
||||
- Implements graph algorithms like cycle detection for dependency management.
|
||||
- **Silent Mode Control**: Provides `enableSilentMode` and `disableSilentMode` functions to control log output.
|
||||
- **Key Components**:
|
||||
- `CONFIG`: Global configuration object.
|
||||
- `log(level, ...args)`: Logging function.
|
||||
@@ -100,19 +101,52 @@ alwaysApply: false
|
||||
- `truncate(text, maxLength)`: String truncation utility.
|
||||
- `formatTaskId(id)` / `findTaskById(tasks, taskId)`: Task ID and search utilities.
|
||||
- `findCycles(subtaskId, dependencyMap)`: Cycle detection algorithm.
|
||||
- `enableSilentMode()` / `disableSilentMode()`: Control console logging output.
|
||||
|
||||
- **[`mcp-server/`](mdc:mcp-server/): MCP Server Integration**
|
||||
- **Purpose**: Provides an MCP (Model Context Protocol) interface for Task Master, allowing integration with external tools like Cursor. Uses FastMCP framework.
|
||||
- **Responsibilities** (See also: [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc)):
|
||||
- Registers Task Master functionalities as tools consumable via MCP.
|
||||
- Handles MCP requests and translates them into calls to the Task Master core logic.
|
||||
- Prefers direct function calls to core modules via [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js) for performance.
|
||||
- Uses CLI execution via `executeTaskMasterCommand` as a fallback.
|
||||
- **Implements Caching**: Utilizes a caching layer (`ContextManager` with `lru-cache`) invoked via `getCachedOrExecute` within direct function wrappers ([`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js)) to optimize performance for specific read operations (e.g., listing tasks).
|
||||
- Standardizes response formatting for MCP clients using utilities in [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
- Handles MCP requests via tool `execute` methods defined in `mcp-server/src/tools/*.js`.
|
||||
- Tool `execute` methods call corresponding **direct function wrappers**.
|
||||
- Tool `execute` methods use `getProjectRootFromSession` (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)) to determine the project root from the client session and pass it to the direct function.
|
||||
- **Direct function wrappers (`*Direct` functions in `mcp-server/src/core/direct-functions/*.js`) contain the main logic for handling MCP requests**, including path resolution, argument validation, caching, and calling core Task Master functions.
|
||||
- Direct functions use `findTasksJsonPath` (from [`core/utils/path-utils.js`](mdc:mcp-server/src/core/utils/path-utils.js)) to locate `tasks.json` based on the provided `projectRoot`.
|
||||
- **Silent Mode Implementation**: Direct functions use `enableSilentMode` and `disableSilentMode` to prevent logs from interfering with JSON responses.
|
||||
- **Async Operations**: Uses `AsyncOperationManager` to handle long-running operations in the background.
|
||||
- **Project Initialization**: Provides `initialize_project` command for setting up new projects from within integrated clients.
|
||||
- Tool `execute` methods use `handleApiResult` from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) to process the result from the direct function and format the final MCP response.
|
||||
- Uses CLI execution via `executeTaskMasterCommand` as a fallback only when necessary.
|
||||
- **Implements Robust Path Finding**: The utility [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) (specifically `getProjectRootFromSession`) and [`core/utils/path-utils.js`](mdc:mcp-server/src/core/utils/path-utils.js) (specifically `findTasksJsonPath`) work together. The tool gets the root via session, passes it to the direct function, which uses `findTasksJsonPath` to locate the specific `tasks.json` file within that root.
|
||||
- **Implements Caching**: Utilizes a caching layer (`ContextManager` with `lru-cache`). Caching logic is invoked *within* the direct function wrappers using the `getCachedOrExecute` utility for performance-sensitive read operations.
|
||||
- Standardizes response formatting and data filtering using utilities in [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
- **Resource Management**: Provides access to static and dynamic resources.
|
||||
- **Key Components**:
|
||||
- `mcp-server/src/index.js`: Main server class definition with FastMCP initialization, resource registration, and server lifecycle management.
|
||||
- `mcp-server/src/server.js`: Main server setup and initialization.
|
||||
- `mcp-server/src/tools/`: Directory containing individual tool definitions, each registering a specific Task Master command for MCP.
|
||||
- `mcp-server/src/tools/`: Directory containing individual tool definitions. Each tool's `execute` method orchestrates the call to core logic and handles the response.
|
||||
- `mcp-server/src/tools/utils.js`: Provides MCP-specific utilities like `handleApiResult`, `processMCPResponseData`, `getCachedOrExecute`, and **`getProjectRootFromSession`**.
|
||||
- `mcp-server/src/core/utils/`: Directory containing utility functions specific to the MCP server, like **`path-utils.js` for resolving `tasks.json` within a given root** and **`async-manager.js` for handling background operations**.
|
||||
- `mcp-server/src/core/direct-functions/`: Directory containing individual files for each **direct function wrapper (`*Direct`)**. These files contain the primary logic for MCP tool execution.
|
||||
- `mcp-server/src/core/resources/`: Directory containing resource handlers for task templates, workflow definitions, and other static/dynamic data exposed to LLM clients.
|
||||
- [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js): Acts as an import/export hub, collecting and exporting direct functions from the `direct-functions` directory and MCP utility functions.
|
||||
- **Naming Conventions**:
|
||||
- **Files** use **kebab-case**: `list-tasks.js`, `set-task-status.js`, `parse-prd.js`
|
||||
- **Direct Functions** use **camelCase** with `Direct` suffix: `listTasksDirect`, `setTaskStatusDirect`, `parsePRDDirect`
|
||||
- **Tool Registration Functions** use **camelCase** with `Tool` suffix: `registerListTasksTool`, `registerSetTaskStatusTool`
|
||||
- **MCP Tool Names** use **snake_case**: `list_tasks`, `set_task_status`, `parse_prd_document`
|
||||
- **Resource Handlers** use **camelCase** with pattern URI: `@mcp.resource("tasks://templates/{template_id}")`
|
||||
- **AsyncOperationManager**:
|
||||
- **Purpose**: Manages background execution of long-running operations.
|
||||
- **Location**: `mcp-server/src/core/utils/async-manager.js`
|
||||
- **Key Features**:
|
||||
- Operation tracking with unique IDs using UUID
|
||||
- Status management (pending, running, completed, failed)
|
||||
- Progress reporting forwarded from background tasks
|
||||
- Operation history with automatic cleanup of completed operations
|
||||
- Context preservation (log, session, reportProgress)
|
||||
- Robust error handling for background tasks
|
||||
- **Usage**: Used for CPU-intensive operations like task expansion and PRD parsing
|
||||
|
||||
- **Data Flow and Module Dependencies**:
|
||||
|
||||
@@ -121,7 +155,7 @@ alwaysApply: false
|
||||
- **UI for Presentation**: [`ui.js`](mdc:scripts/modules/ui.js) is used by command handlers and task/dependency managers to display information to the user. UI functions primarily consume data and format it for output, without modifying core application state.
|
||||
- **Utilities for Common Tasks**: [`utils.js`](mdc:scripts/modules/utils.js) provides helper functions used by all other modules for configuration, logging, file operations, and common data manipulations.
|
||||
- **AI Services Integration**: AI functionalities (complexity analysis, task expansion, PRD parsing) are invoked from [`task-manager.js`](mdc:scripts/modules/task-manager.js) and potentially [`commands.js`](mdc:scripts/modules/commands.js), likely using functions that would reside in a dedicated `ai-services.js` module or be integrated within `utils.js` or `task-manager.js`.
|
||||
- **MCP Server Interaction**: External tools interact with the `mcp-server`, which then calls direct function wrappers in `task-master-core.js` or falls back to `executeTaskMasterCommand`. Responses are formatted by `mcp-server/src/tools/utils.js`. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for details.
|
||||
- **MCP Server Interaction**: External tools interact with the `mcp-server`. MCP Tool `execute` methods use `getProjectRootFromSession` to find the project root, then call direct function wrappers (in `mcp-server/src/core/direct-functions/`) passing the root in `args`. These wrappers handle path finding for `tasks.json` (using `path-utils.js`), validation, caching, call the core logic from `scripts/modules/`, and return a standardized result. The final MCP response is formatted by `mcp-server/src/tools/utils.js`. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for details.
|
||||
|
||||
- **Testing Architecture**:
|
||||
|
||||
@@ -164,3 +198,67 @@ alwaysApply: false
|
||||
- **Clarity**: The modular structure provides a clear separation of concerns, making the codebase easier to navigate and understand for developers.
|
||||
|
||||
This architectural overview should help AI models understand the structure and organization of the Task Master CLI codebase, enabling them to more effectively assist with code generation, modification, and understanding.
|
||||
|
||||
## Implementing MCP Support for a Command
|
||||
|
||||
Follow these steps to add MCP support for an existing Task Master command (see [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for more detail):
|
||||
|
||||
1. **Ensure Core Logic Exists**: Verify the core functionality is implemented and exported from the relevant module in `scripts/modules/`.
|
||||
|
||||
2. **Create Direct Function File in `mcp-server/src/core/direct-functions/`**:
|
||||
- Create a new file (e.g., `your-command.js`) using **kebab-case** naming.
|
||||
- Import necessary core functions, **`findTasksJsonPath` from `../utils/path-utils.js`**, and **silent mode utilities**.
|
||||
- Implement `async function yourCommandDirect(args, log)` using **camelCase** with `Direct` suffix:
|
||||
- **Path Resolution**: Obtain the tasks file path using `const tasksPath = findTasksJsonPath(args, log);`. This relies on `args.projectRoot` being provided.
|
||||
- Parse other `args` and perform necessary validation.
|
||||
- **Implement Silent Mode**: Wrap core function calls with `enableSilentMode()` and `disableSilentMode()`.
|
||||
- Implement caching with `getCachedOrExecute` if applicable.
|
||||
- Call core logic.
|
||||
- Return `{ success: true/false, data/error, fromCache: boolean }`.
|
||||
- Export the wrapper function.
|
||||
|
||||
3. **Update `task-master-core.js` with Import/Export**: Add imports/exports for the new `*Direct` function.
|
||||
|
||||
4. **Create MCP Tool (`mcp-server/src/tools/`)**:
|
||||
- Create a new file (e.g., `your-command.js`) using **kebab-case**.
|
||||
- Import `zod`, `handleApiResult`, **`getProjectRootFromSession`**, and your `yourCommandDirect` function.
|
||||
- Implement `registerYourCommandTool(server)`.
|
||||
- **Define parameters, making `projectRoot` optional**: `projectRoot: z.string().optional().describe(...)`.
|
||||
- Consider if this operation should run in the background using `AsyncOperationManager`.
|
||||
- Implement the standard `execute` method:
|
||||
- Get `rootFolder` using `getProjectRootFromSession` (with fallback to `args.projectRoot`).
|
||||
- Call `yourCommandDirect({ ...args, projectRoot: rootFolder }, log)` or use `asyncOperationManager.addOperation`.
|
||||
- Pass the result to `handleApiResult`.
|
||||
|
||||
5. **Register Tool**: Import and call `registerYourCommandTool` in `mcp-server/src/tools/index.js`.
|
||||
|
||||
6. **Update `mcp.json`**: Add the new tool definition.
|
||||
|
||||
## Project Initialization
|
||||
|
||||
The `initialize_project` command provides a way to set up a new Task Master project:
|
||||
|
||||
- **CLI Command**: `task-master init`
|
||||
- **MCP Tool**: `initialize_project`
|
||||
- **Functionality**:
|
||||
- Creates necessary directories and files for a new project
|
||||
- Sets up `tasks.json` and initial task files
|
||||
- Configures project metadata (name, description, version)
|
||||
- Handles shell alias creation if requested
|
||||
- Works in both interactive and non-interactive modes
|
||||
|
||||
## Async Operation Management
|
||||
|
||||
The AsyncOperationManager provides background task execution capabilities:
|
||||
|
||||
- **Location**: `mcp-server/src/core/utils/async-manager.js`
|
||||
- **Key Components**:
|
||||
- `asyncOperationManager` singleton instance
|
||||
- `addOperation(operationFn, args, context)` method
|
||||
- `getStatus(operationId)` method
|
||||
- **Usage Flow**:
|
||||
1. Client calls an MCP tool that may take time to complete
|
||||
2. Tool uses AsyncOperationManager to run the operation in background
|
||||
3. Tool returns immediate response with operation ID
|
||||
4. Client polls `get_operation_status` tool with the ID
|
||||
5. Once completed, client can access operation results
|
||||
105
.cursor/rules/changeset.mdc
Normal file
105
.cursor/rules/changeset.mdc
Normal file
@@ -0,0 +1,105 @@
|
||||
---
|
||||
description: Guidelines for using Changesets (npm run changeset) to manage versioning and changelogs.
|
||||
alwaysApply: true
|
||||
---
|
||||
|
||||
# Changesets Workflow Guidelines
|
||||
|
||||
Changesets is used to manage package versioning and generate accurate `CHANGELOG.md` files automatically. It's crucial to use it correctly after making meaningful changes that affect the package from an external perspective or significantly impact internal development workflow documented elsewhere.
|
||||
|
||||
## When to Run Changeset
|
||||
|
||||
- Run `npm run changeset` (or `npx changeset add`) **after** you have staged (`git add .`) a logical set of changes that should be communicated in the next release's `CHANGELOG.md`.
|
||||
- This typically includes:
|
||||
- **New Features** (Backward-compatible additions)
|
||||
- **Bug Fixes** (Fixes to existing functionality)
|
||||
- **Breaking Changes** (Changes that are not backward-compatible)
|
||||
- **Performance Improvements** (Enhancements to speed or resource usage)
|
||||
- **Significant Refactoring** (Major code restructuring, even if external behavior is unchanged, as it might affect stability or maintainability) - *Such as reorganizing the MCP server's direct function implementations into separate files*
|
||||
- **User-Facing Documentation Updates** (Changes to README, usage guides, public API docs)
|
||||
- **Dependency Updates** (Especially if they fix known issues or introduce significant changes)
|
||||
- **Build/Tooling Changes** (If they affect how consumers might build or interact with the package)
|
||||
- **Every Pull Request** containing one or more of the above change types **should include a changeset file**.
|
||||
|
||||
## What NOT to Add a Changeset For
|
||||
|
||||
Avoid creating changesets for changes that have **no impact or relevance to external consumers** of the `task-master` package or contributors following **public-facing documentation**. Examples include:
|
||||
|
||||
- **Internal Documentation Updates:** Changes *only* to files within `.cursor/rules/` that solely guide internal development practices for this specific repository.
|
||||
- **Trivial Chores:** Very minor code cleanup, adding comments that don't clarify behavior, typo fixes in non-user-facing code or internal docs.
|
||||
- **Non-Impactful Test Updates:** Minor refactoring of tests, adding tests for existing functionality without fixing bugs.
|
||||
- **Local Configuration Changes:** Updates to personal editor settings, local `.env` files, etc.
|
||||
|
||||
**Rule of Thumb:** If a user installing or using the `task-master` package wouldn't care about the change, or if a contributor following the main README wouldn't need to know about it for their workflow, you likely don't need a changeset.
|
||||
|
||||
## How to Run and What It Asks
|
||||
|
||||
1. **Run the command**:
|
||||
```bash
|
||||
npm run changeset
|
||||
# or
|
||||
npx changeset add
|
||||
```
|
||||
2. **Select Packages**: It will prompt you to select the package(s) affected by your changes using arrow keys and spacebar. If this is not a monorepo, select the main package.
|
||||
3. **Select Bump Type**: Choose the appropriate semantic version bump for **each** selected package:
|
||||
* **`Major`**: For **breaking changes**. Use sparingly.
|
||||
* **`Minor`**: For **new features**.
|
||||
* **`Patch`**: For **bug fixes**, performance improvements, **user-facing documentation changes**, significant refactoring, relevant dependency updates, or impactful build/tooling changes.
|
||||
4. **Enter Summary**: Provide a concise summary of the changes **for the `CHANGELOG.md`**.
|
||||
* **Purpose**: This message is user-facing and explains *what* changed in the release.
|
||||
* **Format**: Use the imperative mood (e.g., "Add feature X", "Fix bug Y", "Update README setup instructions"). Keep it brief, typically a single line.
|
||||
* **Audience**: Think about users installing/updating the package or developers consuming its public API/CLI.
|
||||
* **Not a Git Commit Message**: This summary is *different* from your detailed Git commit message.
|
||||
|
||||
## Changeset Summary vs. Git Commit Message
|
||||
|
||||
- **Changeset Summary**:
|
||||
- **Audience**: Users/Consumers of the package (reads `CHANGELOG.md`).
|
||||
- **Purpose**: Briefly describe *what* changed in the released version that is relevant to them.
|
||||
- **Format**: Concise, imperative mood, single line usually sufficient.
|
||||
- **Example**: `Fix dependency resolution bug in 'next' command.`
|
||||
- **Git Commit Message**:
|
||||
- **Audience**: Developers browsing the Git history of *this* repository.
|
||||
- **Purpose**: Explain *why* the change was made, the context, and the implementation details (can include internal context).
|
||||
- **Format**: Follows commit conventions (e.g., Conventional Commits), can be multi-line with a subject and body.
|
||||
- **Example**:
|
||||
```
|
||||
fix(deps): Correct dependency lookup in 'next' command
|
||||
|
||||
The logic previously failed to account for subtask dependencies when
|
||||
determining the next available task. This commit refactors the
|
||||
dependency check in `findNextTask` within `task-manager.js` to
|
||||
correctly traverse both direct and subtask dependencies. Added
|
||||
unit tests to cover this specific scenario.
|
||||
```
|
||||
- ✅ **DO**: Provide *both* a concise changeset summary (when appropriate) *and* a detailed Git commit message.
|
||||
- ❌ **DON'T**: Use your detailed Git commit message body as the changeset summary.
|
||||
- ❌ **DON'T**: Skip running `changeset` for user-relevant changes just because you wrote a good commit message.
|
||||
|
||||
## The `.changeset` File
|
||||
|
||||
- Running the command creates a unique markdown file in the `.changeset/` directory (e.g., `.changeset/random-name.md`).
|
||||
- This file contains the bump type information and the summary you provided.
|
||||
- **This file MUST be staged and committed** along with your relevant code changes.
|
||||
|
||||
## Standard Workflow Sequence (When a Changeset is Needed)
|
||||
|
||||
1. Make your code or relevant documentation changes.
|
||||
2. Stage your changes: `git add .`
|
||||
3. Run changeset: `npm run changeset`
|
||||
* Select package(s).
|
||||
* Select bump type (`Patch`, `Minor`, `Major`).
|
||||
* Enter the **concise summary** for the changelog.
|
||||
4. Stage the generated changeset file: `git add .changeset/*.md`
|
||||
5. Commit all staged changes (code + changeset file) using your **detailed Git commit message**:
|
||||
```bash
|
||||
git commit -m "feat(module): Add new feature X..."
|
||||
```
|
||||
|
||||
## Release Process (Context)
|
||||
|
||||
- The generated `.changeset/*.md` files are consumed later during the release process.
|
||||
- Commands like `changeset version` read these files, update `package.json` versions, update the `CHANGELOG.md`, and delete the individual changeset files.
|
||||
- Commands like `changeset publish` then publish the new versions to npm.
|
||||
|
||||
Following this workflow ensures that versioning is consistent and changelogs are automatically and accurately generated based on the contributions made.
|
||||
@@ -6,6 +6,16 @@ alwaysApply: false
|
||||
|
||||
# Command-Line Interface Implementation Guidelines
|
||||
|
||||
**Note on Interaction Method:**
|
||||
|
||||
While this document details the implementation of Task Master's **CLI commands**, the **preferred method for interacting with Task Master in integrated environments (like Cursor) is through the MCP server tools**.
|
||||
|
||||
- **Use MCP Tools First**: Always prefer using the MCP tools (e.g., `get_tasks`, `add_task`) when interacting programmatically or via an integrated tool. They offer better performance, structured data, and richer error handling. See [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc) for a comprehensive list of MCP tools and their corresponding CLI commands.
|
||||
- **CLI as Fallback/User Interface**: The `task-master` CLI commands described here are primarily intended for:
|
||||
- Direct user interaction in the terminal.
|
||||
- A fallback mechanism if the MCP server is unavailable or a specific functionality is not exposed via an MCP tool.
|
||||
- **Implementation Context**: This document (`commands.mdc`) focuses on the standards for *implementing* the CLI commands using Commander.js within the [`commands.js`](mdc:scripts/modules/commands.js) module.
|
||||
|
||||
## Command Structure Standards
|
||||
|
||||
- **Basic Command Template**:
|
||||
@@ -27,6 +37,126 @@ alwaysApply: false
|
||||
- ✅ DO: Include validation for required parameters
|
||||
- ❌ DON'T: Implement business logic in command handlers
|
||||
|
||||
## Best Practices for Removal/Delete Commands
|
||||
|
||||
When implementing commands that delete or remove data (like `remove-task` or `remove-subtask`), follow these specific guidelines:
|
||||
|
||||
- **Confirmation Prompts**:
|
||||
- ✅ **DO**: Include a confirmation prompt by default for destructive operations
|
||||
- ✅ **DO**: Provide a `--yes` or `-y` flag to skip confirmation for scripting/automation
|
||||
- ✅ **DO**: Show what will be deleted in the confirmation message
|
||||
- ❌ **DON'T**: Perform destructive operations without user confirmation unless explicitly overridden
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Include confirmation for destructive operations
|
||||
programInstance
|
||||
.command('remove-task')
|
||||
.description('Remove a task or subtask permanently')
|
||||
.option('-i, --id <id>', 'ID of the task to remove')
|
||||
.option('-y, --yes', 'Skip confirmation prompt', false)
|
||||
.action(async (options) => {
|
||||
// Validation code...
|
||||
|
||||
if (!options.yes) {
|
||||
const confirm = await inquirer.prompt([{
|
||||
type: 'confirm',
|
||||
name: 'proceed',
|
||||
message: `Are you sure you want to permanently delete task ${taskId}? This cannot be undone.`,
|
||||
default: false
|
||||
}]);
|
||||
|
||||
if (!confirm.proceed) {
|
||||
console.log(chalk.yellow('Operation cancelled.'));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Proceed with removal...
|
||||
});
|
||||
```
|
||||
|
||||
- **File Path Handling**:
|
||||
- ✅ **DO**: Use `path.join()` to construct file paths
|
||||
- ✅ **DO**: Follow established naming conventions for tasks (e.g., `task_001.txt`)
|
||||
- ✅ **DO**: Check if files exist before attempting to delete them
|
||||
- ✅ **DO**: Handle file deletion errors gracefully
|
||||
- ❌ **DON'T**: Construct paths with string concatenation
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Properly construct file paths
|
||||
const taskFilePath = path.join(
|
||||
path.dirname(tasksPath),
|
||||
`task_${taskId.toString().padStart(3, '0')}.txt`
|
||||
);
|
||||
|
||||
// ✅ DO: Check existence before deletion
|
||||
if (fs.existsSync(taskFilePath)) {
|
||||
try {
|
||||
fs.unlinkSync(taskFilePath);
|
||||
console.log(chalk.green(`Task file deleted: ${taskFilePath}`));
|
||||
} catch (error) {
|
||||
console.warn(chalk.yellow(`Could not delete task file: ${error.message}`));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **Clean Up References**:
|
||||
- ✅ **DO**: Clean up references to the deleted item in other parts of the data
|
||||
- ✅ **DO**: Handle both direct and indirect references
|
||||
- ✅ **DO**: Explain what related data is being updated
|
||||
- ❌ **DON'T**: Leave dangling references
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Clean up references when deleting items
|
||||
console.log(chalk.blue('Cleaning up task dependencies...'));
|
||||
let referencesRemoved = 0;
|
||||
|
||||
// Update dependencies in other tasks
|
||||
data.tasks.forEach(task => {
|
||||
if (task.dependencies && task.dependencies.includes(taskId)) {
|
||||
task.dependencies = task.dependencies.filter(depId => depId !== taskId);
|
||||
referencesRemoved++;
|
||||
}
|
||||
});
|
||||
|
||||
if (referencesRemoved > 0) {
|
||||
console.log(chalk.green(`Removed ${referencesRemoved} references to task ${taskId} from other tasks`));
|
||||
}
|
||||
```
|
||||
|
||||
- **Task File Regeneration**:
|
||||
- ✅ **DO**: Regenerate task files after destructive operations
|
||||
- ✅ **DO**: Pass all required parameters to generation functions
|
||||
- ✅ **DO**: Provide an option to skip regeneration if needed
|
||||
- ❌ **DON'T**: Assume default parameters will work
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Properly regenerate files after deletion
|
||||
if (!options.skipGenerate) {
|
||||
console.log(chalk.blue('Regenerating task files...'));
|
||||
try {
|
||||
// Note both parameters are explicitly provided
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
console.log(chalk.green('Task files regenerated successfully'));
|
||||
} catch (error) {
|
||||
console.warn(chalk.yellow(`Warning: Could not regenerate task files: ${error.message}`));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **Alternative Suggestions**:
|
||||
- ✅ **DO**: Suggest non-destructive alternatives when appropriate
|
||||
- ✅ **DO**: Explain the difference between deletion and status changes
|
||||
- ✅ **DO**: Include examples of alternative commands
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Suggest alternatives for destructive operations
|
||||
console.log(chalk.yellow('Note: If you just want to exclude this task from active work, consider:'));
|
||||
console.log(chalk.cyan(` task-master set-status --id=${taskId} --status=cancelled`));
|
||||
console.log(chalk.cyan(` task-master set-status --id=${taskId} --status=deferred`));
|
||||
console.log('This preserves the task and its history for reference.');
|
||||
```
|
||||
|
||||
## Option Naming Conventions
|
||||
|
||||
- **Command Names**:
|
||||
|
||||
@@ -1,223 +1,125 @@
|
||||
---
|
||||
description: Guide for using meta-development script (scripts/dev.js) to manage task-driven development workflows
|
||||
description: Guide for using Task Master to manage task-driven development workflows
|
||||
globs: **/*
|
||||
alwaysApply: true
|
||||
---
|
||||
|
||||
- **Global CLI Commands**
|
||||
- Task Master now provides a global CLI through the `task-master` command (See [`commands.mdc`](mdc:.cursor/rules/commands.mdc) for details)
|
||||
- All functionality from `scripts/dev.js` is available through this interface
|
||||
- Install globally with `npm install -g claude-task-master` or use locally via `npx`
|
||||
- Use `task-master <command>` instead of `node scripts/dev.js <command>`
|
||||
- Examples:
|
||||
- `task-master list`
|
||||
- `task-master next`
|
||||
- `task-master expand --id=3`
|
||||
- All commands accept the same options as their script equivalents
|
||||
- The CLI (`task-master`) is the **primary** way for users to interact with the application.
|
||||
# Task Master Development Workflow
|
||||
|
||||
- **Development Workflow Process**
|
||||
- Start new projects by running `task-master init` or `node scripts/dev.js parse-prd --input=<prd-file.txt>` to generate initial tasks.json
|
||||
- Begin coding sessions with `task-master list` to see current tasks, status, and IDs
|
||||
- Analyze task complexity with `task-master analyze-complexity --research` before breaking down tasks
|
||||
This guide outlines the typical process for using Task Master to manage software development projects.
|
||||
|
||||
## Primary Interaction: MCP Server vs. CLI
|
||||
|
||||
Task Master offers two primary ways to interact:
|
||||
|
||||
1. **MCP Server (Recommended for Integrated Tools)**:
|
||||
- For AI agents and integrated development environments (like Cursor), interacting via the **MCP server is the preferred method**.
|
||||
- The MCP server exposes Task Master functionality through a set of tools (e.g., `get_tasks`, `add_subtask`).
|
||||
- This method offers better performance, structured data exchange, and richer error handling compared to CLI parsing.
|
||||
- Refer to [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for details on the MCP architecture and available tools.
|
||||
- A comprehensive list and description of MCP tools and their corresponding CLI commands can be found in [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc).
|
||||
- **Restart the MCP server** if core logic in `scripts/modules` or MCP tool/direct function definitions change.
|
||||
|
||||
2. **`task-master` CLI (For Users & Fallback)**:
|
||||
- The global `task-master` command provides a user-friendly interface for direct terminal interaction.
|
||||
- It can also serve as a fallback if the MCP server is inaccessible or a specific function isn't exposed via MCP.
|
||||
- Install globally with `npm install -g task-master-ai` or use locally via `npx task-master-ai ...`.
|
||||
- The CLI commands often mirror the MCP tools (e.g., `task-master list` corresponds to `get_tasks`).
|
||||
- Refer to [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc) for a detailed command reference.
|
||||
|
||||
## Standard Development Workflow Process
|
||||
|
||||
- Start new projects by running `init` tool / `task-master init` or `parse_prd` / `task-master parse-prd --input=<prd-file.txt>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to generate initial tasks.json
|
||||
- Begin coding sessions with `get_tasks` / `task-master list` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to see current tasks, status, and IDs
|
||||
- Determine the next task to work on using `next_task` / `task-master next` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||
- Analyze task complexity with `analyze_complexity` / `task-master analyze-complexity --research` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) before breaking down tasks
|
||||
- Review complexity report using `complexity_report` / `task-master complexity-report` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||
- Select tasks based on dependencies (all marked 'done'), priority level, and ID order
|
||||
- Clarify tasks by checking task files in tasks/ directory or asking for user input
|
||||
- View specific task details using `task-master show <id>` to understand implementation requirements
|
||||
- Break down complex tasks using `task-master expand --id=<id>` with appropriate flags
|
||||
- Clear existing subtasks if needed using `task-master clear-subtasks --id=<id>` before regenerating
|
||||
- View specific task details using `get_task` / `task-master show <id>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to understand implementation requirements
|
||||
- Break down complex tasks using `expand_task` / `task-master expand --id=<id>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) with appropriate flags
|
||||
- Clear existing subtasks if needed using `clear_subtasks` / `task-master clear-subtasks --id=<id>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) before regenerating
|
||||
- Implement code following task details, dependencies, and project standards
|
||||
- Verify tasks according to test strategies before marking as complete
|
||||
- Mark completed tasks with `task-master set-status --id=<id> --status=done`
|
||||
- Update dependent tasks when implementation differs from original plan
|
||||
- Generate task files with `task-master generate` after updating tasks.json
|
||||
- Maintain valid dependency structure with `task-master fix-dependencies` when needed
|
||||
- Verify tasks according to test strategies before marking as complete (See [`tests.mdc`](mdc:.cursor/rules/tests.mdc))
|
||||
- Mark completed tasks with `set_task_status` / `task-master set-status --id=<id> --status=done` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc))
|
||||
- Update dependent tasks when implementation differs from original plan using `update` / `task-master update --from=<id> --prompt="..."` or `update_task` / `task-master update-task --id=<id> --prompt="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc))
|
||||
- Add new tasks discovered during implementation using `add_task` / `task-master add-task --prompt="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||
- Add new subtasks as needed using `add_subtask` / `task-master add-subtask --parent=<id> --title="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||
- Append notes or details to subtasks using `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||
- Generate task files with `generate` / `task-master generate` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) after updating tasks.json
|
||||
- Maintain valid dependency structure with `add_dependency`/`remove_dependency` tools or `task-master add-dependency`/`remove-dependency` commands, `validate_dependencies` / `task-master validate-dependencies`, and `fix_dependencies` / `task-master fix-dependencies` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) when needed
|
||||
- Respect dependency chains and task priorities when selecting work
|
||||
- **MCP Server**: For integrations (like Cursor), interact via the MCP server which prefers direct function calls. Restart the MCP server if core logic in `scripts/modules` changes. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc).
|
||||
- Report progress regularly using the list command
|
||||
- Report progress regularly using `get_tasks` / `task-master list`
|
||||
|
||||
- **Task Complexity Analysis**
|
||||
- Run `node scripts/dev.js analyze-complexity --research` for comprehensive analysis
|
||||
- Review complexity report in scripts/task-complexity-report.json
|
||||
- Or use `node scripts/dev.js complexity-report` for a formatted, readable version of the report
|
||||
## Task Complexity Analysis
|
||||
|
||||
- Run `analyze_complexity` / `task-master analyze-complexity --research` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) for comprehensive analysis
|
||||
- Review complexity report via `complexity_report` / `task-master complexity-report` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) for a formatted, readable version.
|
||||
- Focus on tasks with highest complexity scores (8-10) for detailed breakdown
|
||||
- Use analysis results to determine appropriate subtask allocation
|
||||
- Note that reports are automatically used by the expand command
|
||||
- Note that reports are automatically used by the `expand` tool/command
|
||||
|
||||
- **Task Breakdown Process**
|
||||
- For tasks with complexity analysis, use `node scripts/dev.js expand --id=<id>`
|
||||
- Otherwise use `node scripts/dev.js expand --id=<id> --subtasks=<number>`
|
||||
## Task Breakdown Process
|
||||
|
||||
- For tasks with complexity analysis, use `expand_task` / `task-master expand --id=<id>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc))
|
||||
- Otherwise use `expand_task` / `task-master expand --id=<id> --num=<number>`
|
||||
- Add `--research` flag to leverage Perplexity AI for research-backed expansion
|
||||
- Use `--prompt="<context>"` to provide additional context when needed
|
||||
- Review and adjust generated subtasks as necessary
|
||||
- Use `--all` flag to expand multiple pending tasks at once
|
||||
- If subtasks need regeneration, clear them first with `clear-subtasks` command (See Command Reference below)
|
||||
- Use `--all` flag with `expand` or `expand_all` to expand multiple pending tasks at once
|
||||
- If subtasks need regeneration, clear them first with `clear_subtasks` / `task-master clear-subtasks` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||
|
||||
## Implementation Drift Handling
|
||||
|
||||
- **Implementation Drift Handling**
|
||||
- When implementation differs significantly from planned approach
|
||||
- When future tasks need modification due to current implementation choices
|
||||
- When new dependencies or requirements emerge
|
||||
- Call `node scripts/dev.js update --from=<futureTaskId> --prompt="<explanation>"` to update tasks.json
|
||||
- Use `update` / `task-master update --from=<futureTaskId> --prompt="<explanation>"` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to update multiple future tasks.
|
||||
- Use `update_task` / `task-master update-task --id=<taskId> --prompt="<explanation>"` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to update a single specific task.
|
||||
|
||||
## Task Status Management
|
||||
|
||||
- **Task Status Management**
|
||||
- Use 'pending' for tasks ready to be worked on
|
||||
- Use 'done' for completed and verified tasks
|
||||
- Use 'deferred' for postponed tasks
|
||||
- Add custom status values as needed for project-specific workflows
|
||||
|
||||
- **Task File Format Reference**
|
||||
```
|
||||
# Task ID: <id>
|
||||
# Title: <title>
|
||||
# Status: <status>
|
||||
# Dependencies: <comma-separated list of dependency IDs>
|
||||
# Priority: <priority>
|
||||
# Description: <brief description>
|
||||
# Details:
|
||||
<detailed implementation notes>
|
||||
## Task Structure Fields
|
||||
|
||||
# Test Strategy:
|
||||
<verification approach>
|
||||
```
|
||||
|
||||
- **Command Reference: parse-prd**
|
||||
- CLI Syntax: `task-master parse-prd --input=<prd-file.txt>`
|
||||
- Description: Parses a PRD document and generates a `tasks.json` file with structured tasks
|
||||
- Parameters:
|
||||
- `--input=<file>`: Path to the PRD text file (default: sample-prd.txt)
|
||||
- Example: `task-master parse-prd --input=requirements.txt`
|
||||
- Notes: Will overwrite existing tasks.json file. Use with caution.
|
||||
|
||||
- **Command Reference: update**
|
||||
- CLI Syntax: `task-master update --from=<id> --prompt="<prompt>"`
|
||||
- Description: Updates tasks with ID >= specified ID based on the provided prompt
|
||||
- Parameters:
|
||||
- `--from=<id>`: Task ID from which to start updating (required)
|
||||
- `--prompt="<text>"`: Explanation of changes or new context (required)
|
||||
- Example: `task-master update --from=4 --prompt="Now we are using Express instead of Fastify."`
|
||||
- Notes: Only updates tasks not marked as 'done'. Completed tasks remain unchanged.
|
||||
|
||||
- **Command Reference: update-task**
|
||||
- CLI Syntax: `task-master update-task --id=<id> --prompt="<prompt>"`
|
||||
- Description: Updates a single task by ID with new information
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID of the task to update (required)
|
||||
- `--prompt="<text>"`: New information or context to update the task (required)
|
||||
- `--research`: Use Perplexity AI for research-backed updates
|
||||
- Example: `task-master update-task --id=5 --prompt="Use JWT for authentication instead of sessions."`
|
||||
- Notes: Only updates tasks not marked as 'done'. Preserves completed subtasks.
|
||||
|
||||
- **Command Reference: update-subtask**
|
||||
- CLI Syntax: `task-master update-subtask --id=<id> --prompt="<prompt>"`
|
||||
- Description: Appends additional information to a specific subtask without replacing existing content
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID of the subtask to update in format "parentId.subtaskId" (required)
|
||||
- `--prompt="<text>"`: Information to add to the subtask (required)
|
||||
- `--research`: Use Perplexity AI for research-backed updates
|
||||
- Example: `task-master update-subtask --id=5.2 --prompt="Add details about API rate limiting."`
|
||||
- Notes:
|
||||
- Appends new information to subtask details with timestamp
|
||||
- Does not replace existing content, only adds to it
|
||||
- Uses XML-like tags to clearly mark added information
|
||||
- Will not update subtasks marked as 'done' or 'completed'
|
||||
|
||||
- **Command Reference: generate**
|
||||
- CLI Syntax: `task-master generate`
|
||||
- Description: Generates individual task files in tasks/ directory based on tasks.json
|
||||
- Parameters:
|
||||
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
|
||||
- `--output=<dir>, -o`: Output directory (default: 'tasks')
|
||||
- Example: `task-master generate`
|
||||
- Notes: Overwrites existing task files. Creates tasks/ directory if needed.
|
||||
|
||||
- **Command Reference: set-status**
|
||||
- CLI Syntax: `task-master set-status --id=<id> --status=<status>`
|
||||
- Description: Updates the status of a specific task in tasks.json
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID of the task to update (required)
|
||||
- `--status=<status>`: New status value (required)
|
||||
- Example: `task-master set-status --id=3 --status=done`
|
||||
- Notes: Common values are 'done', 'pending', and 'deferred', but any string is accepted.
|
||||
|
||||
- **Command Reference: list**
|
||||
- CLI Syntax: `task-master list`
|
||||
- Description: Lists all tasks in tasks.json with IDs, titles, and status
|
||||
- Parameters:
|
||||
- `--status=<status>, -s`: Filter by status
|
||||
- `--with-subtasks`: Show subtasks for each task
|
||||
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
|
||||
- Example: `task-master list`
|
||||
- Notes: Provides quick overview of project progress. Use at start of sessions.
|
||||
|
||||
- **Command Reference: expand**
|
||||
- CLI Syntax: `task-master expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
|
||||
- Description: Expands a task with subtasks for detailed implementation
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID of task to expand (required unless using --all)
|
||||
- `--all`: Expand all pending tasks, prioritized by complexity
|
||||
- `--num=<number>`: Number of subtasks to generate (default: from complexity report)
|
||||
- `--research`: Use Perplexity AI for research-backed generation
|
||||
- `--prompt="<text>"`: Additional context for subtask generation
|
||||
- `--force`: Regenerate subtasks even for tasks that already have them
|
||||
- Example: `task-master expand --id=3 --num=5 --research --prompt="Focus on security aspects"`
|
||||
- Notes: Uses complexity report recommendations if available.
|
||||
|
||||
- **Command Reference: analyze-complexity**
|
||||
- CLI Syntax: `task-master analyze-complexity [options]`
|
||||
- Description: Analyzes task complexity and generates expansion recommendations
|
||||
- Parameters:
|
||||
- `--output=<file>, -o`: Output file path (default: scripts/task-complexity-report.json)
|
||||
- `--model=<model>, -m`: Override LLM model to use
|
||||
- `--threshold=<number>, -t`: Minimum score for expansion recommendation (default: 5)
|
||||
- `--file=<path>, -f`: Use alternative tasks.json file
|
||||
- `--research, -r`: Use Perplexity AI for research-backed analysis
|
||||
- Example: `task-master analyze-complexity --research`
|
||||
- Notes: Report includes complexity scores, recommended subtasks, and tailored prompts.
|
||||
|
||||
- **Command Reference: clear-subtasks**
|
||||
- CLI Syntax: `task-master clear-subtasks --id=<id>`
|
||||
- Description: Removes subtasks from specified tasks to allow regeneration
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID or comma-separated IDs of tasks to clear subtasks from
|
||||
- `--all`: Clear subtasks from all tasks
|
||||
- Examples:
|
||||
- `task-master clear-subtasks --id=3`
|
||||
- `task-master clear-subtasks --id=1,2,3`
|
||||
- `task-master clear-subtasks --all`
|
||||
- Notes:
|
||||
- Task files are automatically regenerated after clearing subtasks
|
||||
- Can be combined with expand command to immediately generate new subtasks
|
||||
- Works with both parent tasks and individual subtasks
|
||||
|
||||
- **Task Structure Fields**
|
||||
- **id**: Unique identifier for the task (Example: `1`)
|
||||
- **id**: Unique identifier for the task (Example: `1`, `1.1`)
|
||||
- **title**: Brief, descriptive title (Example: `"Initialize Repo"`)
|
||||
- **description**: Concise summary of what the task involves (Example: `"Create a new repository, set up initial structure."`)
|
||||
- **status**: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
|
||||
- **dependencies**: IDs of prerequisite tasks (Example: `[1, 2]`)
|
||||
- **dependencies**: IDs of prerequisite tasks (Example: `[1, 2.1]`)
|
||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
|
||||
- This helps quickly identify which prerequisite tasks are blocking work
|
||||
- **priority**: Importance level (Example: `"high"`, `"medium"`, `"low"`)
|
||||
- **details**: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
|
||||
- **testStrategy**: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
|
||||
- **subtasks**: List of smaller, more specific tasks (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
|
||||
- Refer to [`tasks.mdc`](mdc:.cursor/rules/tasks.mdc) for more details on the task data structure.
|
||||
|
||||
- **Environment Variables Configuration**
|
||||
- **ANTHROPIC_API_KEY** (Required): Your Anthropic API key for Claude (Example: `ANTHROPIC_API_KEY=sk-ant-api03-...`)
|
||||
- **MODEL** (Default: `"claude-3-7-sonnet-20250219"`): Claude model to use (Example: `MODEL=claude-3-opus-20240229`)
|
||||
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
|
||||
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
|
||||
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
|
||||
- **LOG_LEVEL** (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`)
|
||||
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
|
||||
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
|
||||
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
|
||||
- **PROJECT_VERSION** (Default: `"1.0.0"`): Version in metadata (Example: `PROJECT_VERSION=2.1.0`)
|
||||
- **PERPLEXITY_API_KEY**: For research-backed features (Example: `PERPLEXITY_API_KEY=pplx-...`)
|
||||
- **PERPLEXITY_MODEL** (Default: `"sonar-medium-online"`): Perplexity model (Example: `PERPLEXITY_MODEL=sonar-large-online`)
|
||||
## Environment Variables Configuration
|
||||
|
||||
- **Determining the Next Task**
|
||||
- Run `task-master next` to show the next task to work on
|
||||
- The next command identifies tasks with all dependencies satisfied
|
||||
- Task Master behavior is configured via environment variables:
|
||||
- **ANTHROPIC_API_KEY** (Required): Your Anthropic API key for Claude.
|
||||
- **MODEL**: Claude model to use (e.g., `claude-3-opus-20240229`).
|
||||
- **MAX_TOKENS**: Maximum tokens for AI responses.
|
||||
- **TEMPERATURE**: Temperature for AI model responses.
|
||||
- **DEBUG**: Enable debug logging (`true`/`false`).
|
||||
- **LOG_LEVEL**: Console output level (`debug`, `info`, `warn`, `error`).
|
||||
- **DEFAULT_SUBTASKS**: Default number of subtasks for `expand`.
|
||||
- **DEFAULT_PRIORITY**: Default priority for new tasks.
|
||||
- **PROJECT_NAME**: Project name used in metadata.
|
||||
- **PROJECT_VERSION**: Project version used in metadata.
|
||||
- **PERPLEXITY_API_KEY**: API key for Perplexity AI (for `--research` flags).
|
||||
- **PERPLEXITY_MODEL**: Perplexity model to use (e.g., `sonar-medium-online`).
|
||||
- See [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc) for default values and examples.
|
||||
|
||||
## Determining the Next Task
|
||||
|
||||
- Run `next_task` / `task-master next` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to show the next task to work on
|
||||
- The command identifies tasks with all dependencies satisfied
|
||||
- Tasks are prioritized by priority level, dependency count, and ID
|
||||
- The command shows comprehensive task information including:
|
||||
- Basic task details and description
|
||||
@@ -229,8 +131,9 @@ alwaysApply: true
|
||||
- Ensures tasks are completed in the appropriate sequence
|
||||
- Provides ready-to-use commands for common task actions
|
||||
|
||||
- **Viewing Specific Task Details**
|
||||
- Run `task-master show <id>` or `task-master show --id=<id>` to view a specific task
|
||||
## Viewing Specific Task Details
|
||||
|
||||
- Run `get_task` / `task-master show <id>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to view a specific task
|
||||
- Use dot notation for subtasks: `task-master show 1.2` (shows subtask 2 of task 1)
|
||||
- Displays comprehensive information similar to the next command, but for a specific task
|
||||
- For parent tasks, shows all subtasks and their current status
|
||||
@@ -238,108 +141,22 @@ alwaysApply: true
|
||||
- Provides contextual suggested actions appropriate for the specific task
|
||||
- Useful for examining task details before implementation or checking status
|
||||
|
||||
- **Managing Task Dependencies**
|
||||
- Use `task-master add-dependency --id=<id> --depends-on=<id>` to add a dependency
|
||||
- Use `task-master remove-dependency --id=<id> --depends-on=<id>` to remove a dependency
|
||||
## Managing Task Dependencies
|
||||
|
||||
- Use `add_dependency` / `task-master add-dependency --id=<id> --depends-on=<id>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to add a dependency
|
||||
- Use `remove_dependency` / `task-master remove-dependency --id=<id> --depends-on=<id>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to remove a dependency
|
||||
- The system prevents circular dependencies and duplicate dependency entries
|
||||
- Dependencies are checked for existence before being added or removed
|
||||
- Task files are automatically regenerated after dependency changes
|
||||
- Dependencies are visualized with status indicators in task listings and files
|
||||
|
||||
- **Command Reference: add-dependency**
|
||||
- CLI Syntax: `task-master add-dependency --id=<id> --depends-on=<id>`
|
||||
- Description: Adds a dependency relationship between two tasks
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID of task that will depend on another task (required)
|
||||
- `--depends-on=<id>`: ID of task that will become a dependency (required)
|
||||
- Example: `task-master add-dependency --id=22 --depends-on=21`
|
||||
- Notes: Prevents circular dependencies and duplicates; updates task files automatically
|
||||
## Code Analysis & Refactoring Techniques
|
||||
|
||||
- **Command Reference: remove-dependency**
|
||||
- CLI Syntax: `task-master remove-dependency --id=<id> --depends-on=<id>`
|
||||
- Description: Removes a dependency relationship between two tasks
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID of task to remove dependency from (required)
|
||||
- `--depends-on=<id>`: ID of task to remove as a dependency (required)
|
||||
- Example: `task-master remove-dependency --id=22 --depends-on=21`
|
||||
- Notes: Checks if dependency actually exists; updates task files automatically
|
||||
- **Top-Level Function Search**:
|
||||
- Useful for understanding module structure or planning refactors.
|
||||
- Use grep/ripgrep to find exported functions/constants:
|
||||
`rg "export (async function|function|const) \w+"` or similar patterns.
|
||||
- Can help compare functions between files during migrations or identify potential naming conflicts.
|
||||
|
||||
- **Command Reference: validate-dependencies**
|
||||
- CLI Syntax: `task-master validate-dependencies [options]`
|
||||
- Description: Checks for and identifies invalid dependencies in tasks.json and task files
|
||||
- Parameters:
|
||||
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
|
||||
- Example: `task-master validate-dependencies`
|
||||
- Notes:
|
||||
- Reports all non-existent dependencies and self-dependencies without modifying files
|
||||
- Provides detailed statistics on task dependency state
|
||||
- Use before fix-dependencies to audit your task structure
|
||||
|
||||
- **Command Reference: fix-dependencies**
|
||||
- CLI Syntax: `task-master fix-dependencies [options]`
|
||||
- Description: Finds and fixes all invalid dependencies in tasks.json and task files
|
||||
- Parameters:
|
||||
- `--file=<path>, -f`: Use alternative tasks.json file (default: 'tasks/tasks.json')
|
||||
- Example: `task-master fix-dependencies`
|
||||
- Notes:
|
||||
- Removes references to non-existent tasks and subtasks
|
||||
- Eliminates self-dependencies (tasks depending on themselves)
|
||||
- Regenerates task files with corrected dependencies
|
||||
- Provides detailed report of all fixes made
|
||||
|
||||
- **Command Reference: complexity-report**
|
||||
- CLI Syntax: `task-master complexity-report [options]`
|
||||
- Description: Displays the task complexity analysis report in a formatted, easy-to-read way
|
||||
- Parameters:
|
||||
- `--file=<path>, -f`: Path to the complexity report file (default: 'scripts/task-complexity-report.json')
|
||||
- Example: `task-master complexity-report`
|
||||
- Notes:
|
||||
- Shows tasks organized by complexity score with recommended actions
|
||||
- Provides complexity distribution statistics
|
||||
- Displays ready-to-use expansion commands for complex tasks
|
||||
- If no report exists, offers to generate one interactively
|
||||
|
||||
- **Command Reference: add-task**
|
||||
- CLI Syntax: `task-master add-task [options]`
|
||||
- Description: Add a new task to tasks.json using AI
|
||||
- Parameters:
|
||||
- `--file=<path>, -f`: Path to the tasks file (default: 'tasks/tasks.json')
|
||||
- `--prompt=<text>, -p`: Description of the task to add (required)
|
||||
- `--dependencies=<ids>, -d`: Comma-separated list of task IDs this task depends on
|
||||
- `--priority=<priority>`: Task priority (high, medium, low) (default: 'medium')
|
||||
- Example: `task-master add-task --prompt="Create user authentication using Auth0"`
|
||||
- Notes: Uses AI to convert description into structured task with appropriate details
|
||||
|
||||
- **Command Reference: init**
|
||||
- CLI Syntax: `task-master init`
|
||||
- Description: Initialize a new project with Task Master structure
|
||||
- Parameters: None
|
||||
- Example: `task-master init`
|
||||
- Notes:
|
||||
- Creates initial project structure with required files
|
||||
- Prompts for project settings if not provided
|
||||
- Merges with existing files when appropriate
|
||||
- Can be used to bootstrap a new Task Master project quickly
|
||||
|
||||
- **Code Analysis & Refactoring Techniques**
|
||||
- **Top-Level Function Search**
|
||||
- Use grep pattern matching to find all exported functions across the codebase
|
||||
- Command: `grep -E "export (function|const) \w+|function \w+\(|const \w+ = \(|module\.exports" --include="*.js" -r ./`
|
||||
- Benefits:
|
||||
- Quickly identify all public API functions without reading implementation details
|
||||
- Compare functions between files during refactoring (e.g., monolithic to modular structure)
|
||||
- Verify all expected functions exist in refactored modules
|
||||
- Identify duplicate functionality or naming conflicts
|
||||
- Usage examples:
|
||||
- When migrating from `scripts/dev.js` to modular structure: `grep -E "function \w+\(" scripts/dev.js`
|
||||
- Check function exports in a directory: `grep -E "export (function|const)" scripts/modules/`
|
||||
- Find potential naming conflicts: `grep -E "function (get|set|create|update)\w+\(" -r ./`
|
||||
- Variations:
|
||||
- Add `-n` flag to include line numbers
|
||||
- Add `--include="*.ts"` to filter by file extension
|
||||
- Use with `| sort` to alphabetize results
|
||||
- Integration with refactoring workflow:
|
||||
- Start by mapping all functions in the source file
|
||||
- Create target module files based on function grouping
|
||||
- Verify all functions were properly migrated
|
||||
- Check for any unintentional duplications or omissions
|
||||
---
|
||||
*This workflow provides a general guideline. Adapt it based on your specific project needs and team practices.*
|
||||
26
.cursor/rules/glossary.mdc
Normal file
26
.cursor/rules/glossary.mdc
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
description: Glossary of other Cursor rules
|
||||
globs: **/*
|
||||
alwaysApply: true
|
||||
---
|
||||
|
||||
# Glossary of Task Master Cursor Rules
|
||||
|
||||
This file provides a quick reference to the purpose of each rule file located in the `.cursor/rules` directory.
|
||||
|
||||
- **[`architecture.mdc`](mdc:.cursor/rules/architecture.mdc)**: Describes the high-level architecture of the Task Master CLI application.
|
||||
- **[`changeset.mdc`](mdc:.cursor/rules/changeset.mdc)**: Guidelines for using Changesets (npm run changeset) to manage versioning and changelogs.
|
||||
- **[`commands.mdc`](mdc:.cursor/rules/commands.mdc)**: Guidelines for implementing CLI commands using Commander.js.
|
||||
- **[`cursor_rules.mdc`](mdc:.cursor/rules/cursor_rules.mdc)**: Guidelines for creating and maintaining Cursor rules to ensure consistency and effectiveness.
|
||||
- **[`dependencies.mdc`](mdc:.cursor/rules/dependencies.mdc)**: Guidelines for managing task dependencies and relationships.
|
||||
- **[`dev_workflow.mdc`](mdc:.cursor/rules/dev_workflow.mdc)**: Guide for using Task Master to manage task-driven development workflows.
|
||||
- **[`glossary.mdc`](mdc:.cursor/rules/glossary.mdc)**: This file; provides a glossary of other Cursor rules.
|
||||
- **[`mcp.mdc`](mdc:.cursor/rules/mcp.mdc)**: Guidelines for implementing and interacting with the Task Master MCP Server.
|
||||
- **[`new_features.mdc`](mdc:.cursor/rules/new_features.mdc)**: Guidelines for integrating new features into the Task Master CLI.
|
||||
- **[`self_improve.mdc`](mdc:.cursor/rules/self_improve.mdc)**: Guidelines for continuously improving Cursor rules based on emerging code patterns and best practices.
|
||||
- **[`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)**: Comprehensive reference for Taskmaster MCP tools and CLI commands.
|
||||
- **[`tasks.mdc`](mdc:.cursor/rules/tasks.mdc)**: Guidelines for implementing task management operations.
|
||||
- **[`tests.mdc`](mdc:.cursor/rules/tests.mdc)**: Guidelines for implementing and maintaining tests for Task Master CLI.
|
||||
- **[`ui.mdc`](mdc:.cursor/rules/ui.mdc)**: Guidelines for implementing and maintaining user interface components.
|
||||
- **[`utilities.mdc`](mdc:.cursor/rules/utilities.mdc)**: Guidelines for implementing utility functions.
|
||||
|
||||
@@ -12,76 +12,541 @@ This document outlines the architecture and implementation patterns for the Task
|
||||
|
||||
The MCP server acts as a bridge between external tools (like Cursor) and the core Task Master CLI logic. It leverages FastMCP for the server framework.
|
||||
|
||||
- **Flow**: `External Tool (Cursor)` <-> `FastMCP Server` <-> `MCP Tools` (`mcp-server/src/tools/*.js`) <-> `Core Logic Wrappers` (`mcp-server/src/core/task-master-core.js`) <-> `Core Modules` (`scripts/modules/*.js`)
|
||||
- **Flow**: `External Tool (Cursor)` <-> `FastMCP Server` <-> `MCP Tools` (`mcp-server/src/tools/*.js`) <-> `Core Logic Wrappers` (`mcp-server/src/core/direct-functions/*.js`, exported via `task-master-core.js`) <-> `Core Modules` (`scripts/modules/*.js`)
|
||||
- **Goal**: Provide a performant and reliable way for external tools to interact with Task Master functionality without directly invoking the CLI for every operation.
|
||||
|
||||
## Direct Function Implementation Best Practices
|
||||
|
||||
When implementing a new direct function in `mcp-server/src/core/direct-functions/`, follow these critical guidelines:
|
||||
|
||||
1. **Verify Function Dependencies**:
|
||||
- ✅ **DO**: Check that all helper functions your direct function needs are properly exported from their source modules
|
||||
- ✅ **DO**: Import these dependencies explicitly at the top of your file
|
||||
- ❌ **DON'T**: Assume helper functions like `findTaskById` or `taskExists` are automatically available
|
||||
- **Example**:
|
||||
```javascript
|
||||
// At top of direct-function file
|
||||
import { removeTask, findTaskById, taskExists } from '../../../../scripts/modules/task-manager.js';
|
||||
```
|
||||
|
||||
2. **Parameter Verification and Completeness**:
|
||||
- ✅ **DO**: Verify the signature of core functions you're calling and ensure all required parameters are provided
|
||||
- ✅ **DO**: Pass explicit values for required parameters rather than relying on defaults
|
||||
- ✅ **DO**: Double-check parameter order against function definition
|
||||
- ❌ **DON'T**: Omit parameters assuming they have default values
|
||||
- **Example**:
|
||||
```javascript
|
||||
// Correct parameter handling in direct function
|
||||
async function generateTaskFilesDirect(args, log) {
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
const outputDir = args.output || path.dirname(tasksPath);
|
||||
|
||||
try {
|
||||
// Pass all required parameters
|
||||
const result = await generateTaskFiles(tasksPath, outputDir);
|
||||
return { success: true, data: result, fromCache: false };
|
||||
} catch (error) {
|
||||
// Error handling...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. **Consistent File Path Handling**:
|
||||
- ✅ **DO**: Use `path.join()` instead of string concatenation for file paths
|
||||
- ✅ **DO**: Follow established file naming conventions (`task_001.txt` not `1.md`)
|
||||
- ✅ **DO**: Use `path.dirname()` and other path utilities for manipulating paths
|
||||
- ✅ **DO**: When paths relate to task files, follow the standard format: `task_${id.toString().padStart(3, '0')}.txt`
|
||||
- ❌ **DON'T**: Create custom file path handling logic that diverges from established patterns
|
||||
- **Example**:
|
||||
```javascript
|
||||
// Correct file path handling
|
||||
const taskFilePath = path.join(
|
||||
path.dirname(tasksPath),
|
||||
`task_${taskId.toString().padStart(3, '0')}.txt`
|
||||
);
|
||||
```
|
||||
|
||||
4. **Comprehensive Error Handling**:
|
||||
- ✅ **DO**: Wrap core function calls in try/catch blocks
|
||||
- ✅ **DO**: Log errors with appropriate severity and context
|
||||
- ✅ **DO**: Return standardized error objects with code and message
|
||||
- ✅ **DO**: Handle file system errors separately from function-specific errors
|
||||
- **Example**:
|
||||
```javascript
|
||||
try {
|
||||
// Core function call
|
||||
} catch (error) {
|
||||
log.error(`Failed to execute command: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: error.code || 'DIRECT_FUNCTION_ERROR',
|
||||
message: error.message,
|
||||
details: error.stack
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
5. **Silent Mode Implementation**:
|
||||
- ✅ **DO**: Import silent mode utilities at the top of your file
|
||||
```javascript
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
```
|
||||
- ✅ **DO**: Wrap core function calls with silent mode control
|
||||
```javascript
|
||||
// Enable silent mode before the core function call
|
||||
enableSilentMode();
|
||||
|
||||
// Execute core function
|
||||
const result = await coreFunction(param1, param2);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
```
|
||||
- ✅ **DO**: Add proper error handling to ensure silent mode is disabled
|
||||
```javascript
|
||||
try {
|
||||
enableSilentMode();
|
||||
// Core function execution
|
||||
const result = await coreFunction(param1, param2);
|
||||
disableSilentMode();
|
||||
return { success: true, data: result };
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
log.error(`Error in function: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'ERROR_CODE', message: error.message }
|
||||
};
|
||||
}
|
||||
```
|
||||
- ❌ **DON'T**: Forget to disable silent mode when errors occur
|
||||
- ❌ **DON'T**: Leave silent mode enabled outside a direct function's scope
|
||||
- ❌ **DON'T**: Skip silent mode for core function calls that generate logs
|
||||
|
||||
## Tool Definition and Execution
|
||||
|
||||
### Tool Structure
|
||||
|
||||
MCP tools must follow a specific structure to properly interact with the FastMCP framework:
|
||||
|
||||
```javascript
|
||||
server.addTool({
|
||||
name: "tool_name", // Use snake_case for tool names
|
||||
description: "Description of what the tool does",
|
||||
parameters: z.object({
|
||||
// Define parameters using Zod
|
||||
param1: z.string().describe("Parameter description"),
|
||||
param2: z.number().optional().describe("Optional parameter description"),
|
||||
// IMPORTANT: For file operations, always include these optional parameters
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (typically derived from session)")
|
||||
}),
|
||||
|
||||
// The execute function is the core of the tool implementation
|
||||
execute: async (args, context) => {
|
||||
// Implementation goes here
|
||||
// Return response in the appropriate format
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Execute Function Signature
|
||||
|
||||
The `execute` function receives validated arguments and the FastMCP context:
|
||||
|
||||
```javascript
|
||||
execute: async (args, context) => {
|
||||
// Tool implementation
|
||||
}
|
||||
```
|
||||
|
||||
- **args**: The first parameter contains all the validated parameters defined in the tool's schema.
|
||||
- **context**: The second parameter is an object containing `{ log, reportProgress, session }` provided by FastMCP.
|
||||
- ✅ **DO**: `execute: async (args, { log, reportProgress, session }) => {}`
|
||||
|
||||
### Standard Tool Execution Pattern
|
||||
|
||||
The `execute` method within each MCP tool (in `mcp-server/src/tools/*.js`) should follow this standard pattern:
|
||||
|
||||
1. **Log Entry**: Log the start of the tool execution with relevant arguments.
|
||||
2. **Get Project Root**: Use the `getProjectRootFromSession(session, log)` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)) to extract the project root path from the client session. Fall back to `args.projectRoot` if the session doesn't provide a root.
|
||||
3. **Call Direct Function**: Invoke the corresponding `*Direct` function wrapper (e.g., `listTasksDirect` from [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js)), passing an updated `args` object that includes the resolved `projectRoot`, along with the `log` object: `await someDirectFunction({ ...args, projectRoot: resolvedRootFolder }, log);`
|
||||
4. **Handle Result**: Receive the result object (`{ success, data/error, fromCache }`) from the `*Direct` function.
|
||||
5. **Format Response**: Pass this result object to the `handleApiResult` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)) for standardized MCP response formatting and error handling.
|
||||
6. **Return**: Return the formatted response object provided by `handleApiResult`.
|
||||
|
||||
```javascript
|
||||
// Example execute method structure
|
||||
import { getProjectRootFromSession, handleApiResult, createErrorResponse } from './utils.js';
|
||||
import { someDirectFunction } from '../core/task-master-core.js';
|
||||
|
||||
// ... inside server.addTool({...})
|
||||
execute: async (args, { log, reportProgress, session }) => {
|
||||
try {
|
||||
log.info(`Starting tool execution with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// 1. Get Project Root
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
if (!rootFolder && args.projectRoot) { // Fallback if needed
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
// 2. Call Direct Function (passing resolved root)
|
||||
const result = await someDirectFunction({
|
||||
...args,
|
||||
projectRoot: rootFolder // Ensure projectRoot is explicitly passed
|
||||
}, log);
|
||||
|
||||
// 3. Handle and Format Response
|
||||
return handleApiResult(result, log);
|
||||
|
||||
} catch (error) {
|
||||
log.error(`Error during tool execution: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Using AsyncOperationManager for Background Tasks
|
||||
|
||||
For tools that execute long-running operations, use the AsyncOperationManager to run them in the background:
|
||||
|
||||
```javascript
|
||||
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
||||
import { getProjectRootFromSession, createContentResponse, createErrorResponse } from './utils.js';
|
||||
import { someIntensiveDirect } from '../core/task-master-core.js';
|
||||
|
||||
// ... inside server.addTool({...})
|
||||
execute: async (args, { log, reportProgress, session }) => {
|
||||
try {
|
||||
log.info(`Starting background operation with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// 1. Get Project Root
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
// 2. Add operation to the async manager
|
||||
const operationId = asyncOperationManager.addOperation(
|
||||
someIntensiveDirect, // The direct function to execute
|
||||
{ ...args, projectRoot: rootFolder }, // Args to pass
|
||||
{ log, reportProgress, session } // Context to preserve
|
||||
);
|
||||
|
||||
// 3. Return immediate response with operation ID
|
||||
return createContentResponse({
|
||||
message: "Operation started successfully",
|
||||
operationId,
|
||||
status: "pending"
|
||||
});
|
||||
} catch (error) {
|
||||
log.error(`Error starting background operation: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Clients should then use the `get_operation_status` tool to check on operation progress:
|
||||
|
||||
```javascript
|
||||
// In get-operation-status.js
|
||||
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
||||
import { createContentResponse, createErrorResponse } from './utils.js';
|
||||
|
||||
// ... inside server.addTool({...})
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
const { operationId } = args;
|
||||
log.info(`Checking status of operation: ${operationId}`);
|
||||
|
||||
const status = asyncOperationManager.getStatus(operationId);
|
||||
|
||||
if (status.status === 'not_found') {
|
||||
return createErrorResponse(status.error.message);
|
||||
}
|
||||
|
||||
return createContentResponse({
|
||||
...status,
|
||||
message: `Operation status: ${status.status}`
|
||||
});
|
||||
} catch (error) {
|
||||
log.error(`Error checking operation status: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Project Initialization Tool
|
||||
|
||||
The `initialize_project` tool allows integrated clients like Cursor to set up a new Task Master project:
|
||||
|
||||
```javascript
|
||||
// In initialize-project.js
|
||||
import { z } from "zod";
|
||||
import { initializeProjectDirect } from "../core/task-master-core.js";
|
||||
import { handleApiResult, createErrorResponse } from "./utils.js";
|
||||
|
||||
export function registerInitializeProjectTool(server) {
|
||||
server.addTool({
|
||||
name: "initialize_project",
|
||||
description: "Initialize a new Task Master project",
|
||||
parameters: z.object({
|
||||
projectName: z.string().optional().describe("The name for the new project"),
|
||||
projectDescription: z.string().optional().describe("A brief description"),
|
||||
projectVersion: z.string().optional().describe("Initial version (e.g., '0.1.0')"),
|
||||
authorName: z.string().optional().describe("The author's name"),
|
||||
skipInstall: z.boolean().optional().describe("Skip installing dependencies"),
|
||||
addAliases: z.boolean().optional().describe("Add shell aliases"),
|
||||
yes: z.boolean().optional().describe("Skip prompts and use defaults")
|
||||
}),
|
||||
execute: async (args, { log, reportProgress }) => {
|
||||
try {
|
||||
// Since we're initializing, we don't need project root
|
||||
const result = await initializeProjectDirect(args, log);
|
||||
return handleApiResult(result, log, 'Error initializing project');
|
||||
} catch (error) {
|
||||
log.error(`Error in initialize_project: ${error.message}`);
|
||||
return createErrorResponse(`Failed to initialize project: ${error.message}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### Logging Convention
|
||||
|
||||
The `log` object (destructured from `context`) provides standardized logging methods. Use it within both the `execute` method and the `*Direct` functions.
|
||||
|
||||
```javascript
|
||||
// Proper logging usage
|
||||
log.info(`Starting ${toolName} with parameters: ${JSON.stringify(sanitizedArgs)}`);
|
||||
log.debug("Detailed operation info", { data });
|
||||
log.warn("Potential issue detected");
|
||||
log.error(`Error occurred: ${error.message}`, { stack: error.stack });
|
||||
```
|
||||
|
||||
### Progress Reporting Convention
|
||||
|
||||
Use `reportProgress` (destructured from `context`) for long-running operations. It expects an object `{ progress: number, total?: number }`.
|
||||
|
||||
```javascript
|
||||
await reportProgress({ progress: 0 }); // Start
|
||||
// ... work ...
|
||||
await reportProgress({ progress: 50 }); // Intermediate (total optional)
|
||||
// ... more work ...
|
||||
await reportProgress({ progress: 100 }); // Complete
|
||||
```
|
||||
|
||||
### Session Usage Convention
|
||||
|
||||
The `session` object (destructured from `context`) contains authenticated session data and client information.
|
||||
|
||||
- **Authentication**: Access user-specific data (`session.userId`, etc.) if authentication is implemented.
|
||||
- **Project Root**: The primary use in Task Master is accessing `session.roots` to determine the client's project root directory via the `getProjectRootFromSession` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)). See the Standard Tool Execution Pattern above.
|
||||
- **Capabilities**: Can be used to check client capabilities (`session.clientCapabilities`).
|
||||
|
||||
## Direct Function Wrappers (`*Direct`)
|
||||
|
||||
These functions, located in `mcp-server/src/core/direct-functions/`, form the core logic execution layer for MCP tools.
|
||||
|
||||
- **Purpose**: Bridge MCP tools and core Task Master modules (`scripts/modules/*`).
|
||||
- **Responsibilities**:
|
||||
- Receive `args` (including the `projectRoot` determined by the tool) and `log` object.
|
||||
- **Find `tasks.json`**: Use `findTasksJsonPath(args, log)` from [`core/utils/path-utils.js`](mdc:mcp-server/src/core/utils/path-utils.js). This function prioritizes the provided `args.projectRoot`.
|
||||
- Validate arguments specific to the core logic.
|
||||
- **Implement Silent Mode**: Import and use `enableSilentMode` and `disableSilentMode` around core function calls.
|
||||
- **Implement Caching**: Use `getCachedOrExecute` from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) for read operations.
|
||||
- Call the underlying function from the core Task Master modules.
|
||||
- Handle errors gracefully.
|
||||
- Return a standardized result object: `{ success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }`.
|
||||
|
||||
## Key Principles
|
||||
|
||||
- **Prefer Direct Function Calls**: For optimal performance and error handling, MCP tools should utilize direct function wrappers defined in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js). These wrappers call the underlying logic from the core modules (e.g., [`task-manager.js`](mdc:scripts/modules/task-manager.js)).
|
||||
- **Use `executeMCPToolAction`**: This utility function in [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) is the standard wrapper for executing the main logic within an MCP tool's `execute` function. It handles common boilerplate like logging, argument processing, calling the core action (`*Direct` function), and formatting the response.
|
||||
- **CLI Execution as Fallback**: The `executeTaskMasterCommand` utility in [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) allows executing commands via the CLI (`task-master ...`). This should **only** be used as a fallback if a direct function wrapper is not yet implemented or if a specific command intrinsically requires CLI execution.
|
||||
- **Centralized Utilities** (See also: [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc)):
|
||||
- Use `findTasksJsonPath` (in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js)) within direct function wrappers to locate the `tasks.json` file consistently.
|
||||
- **Leverage MCP Utilities**: The file [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) contains essential helpers for MCP tool implementation:
|
||||
- `getProjectRoot`: Normalizes project paths (used internally by other utils).
|
||||
- `handleApiResult`: Standardizes handling results from direct function calls (success/error).
|
||||
- `createContentResponse`/`createErrorResponse`: Formats successful/error MCP responses.
|
||||
- `processMCPResponseData`: Filters/cleans data for MCP responses (e.g., removing `details`, `testStrategy`). This is the default processor used by `executeMCPToolAction`.
|
||||
- `executeMCPToolAction`: The primary wrapper function for tool execution logic.
|
||||
- `executeTaskMasterCommand`: Fallback for executing CLI commands.
|
||||
- **Caching**: To improve performance for frequently called read operations (like `listTasks`), a caching layer using `lru-cache` is implemented.
|
||||
- Caching logic should be added *inside* the direct function wrappers in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js) using the `getCachedOrExecute` utility from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
- Generate unique cache keys based on function arguments that define a distinct call.
|
||||
- Responses will include a `fromCache` flag.
|
||||
- Cache statistics can be monitored using the `cacheStats` MCP tool (implemented via `getCacheStatsDirect`).
|
||||
- **Prefer Direct Function Calls**: MCP tools should always call `*Direct` wrappers instead of `executeTaskMasterCommand`.
|
||||
- **Standardized Execution Flow**: Follow the pattern: MCP Tool -> `getProjectRootFromSession` -> `*Direct` Function -> Core Logic.
|
||||
- **Path Resolution via Direct Functions**: The `*Direct` function is responsible for finding the exact `tasks.json` path using `findTasksJsonPath`, relying on the `projectRoot` passed in `args`.
|
||||
- **Silent Mode in Direct Functions**: Wrap all core function calls with `enableSilentMode()` and `disableSilentMode()` to prevent logs from interfering with JSON responses.
|
||||
- **Async Processing for Intensive Operations**: Use AsyncOperationManager for CPU-intensive or long-running operations.
|
||||
- **Project Initialization**: Use the initialize_project tool for setting up new projects in integrated environments.
|
||||
- **Centralized Utilities**: Use helpers from `mcp-server/src/tools/utils.js` (like `handleApiResult`, `getProjectRootFromSession`, `getCachedOrExecute`) and `mcp-server/src/core/utils/path-utils.js` (`findTasksJsonPath`). See [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc).
|
||||
- **Caching in Direct Functions**: Caching logic resides *within* the `*Direct` functions using `getCachedOrExecute`.
|
||||
|
||||
## Resources and Resource Templates
|
||||
|
||||
Resources provide LLMs with static or dynamic data without executing tools.
|
||||
|
||||
- **Implementation**: Use `@mcp.resource()` decorator pattern or `server.addResource`/`server.addResourceTemplate` in `mcp-server/src/core/resources/`.
|
||||
- **Registration**: Register resources during server initialization in [`mcp-server/src/index.js`](mdc:mcp-server/src/index.js).
|
||||
- **Best Practices**: Organize resources, validate parameters, use consistent URIs, handle errors. See [`fastmcp-core.txt`](docs/fastmcp-core.txt) for underlying SDK details.
|
||||
|
||||
*(Self-correction: Removed detailed Resource implementation examples as they were less relevant to the current user focus on tool execution flow and project roots. Kept the overview.)*
|
||||
|
||||
## Implementing MCP Support for a Command
|
||||
|
||||
Follow these steps to add MCP support for an existing Task Master command (see [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for more detail):
|
||||
|
||||
1. **Ensure Core Logic Exists**: Verify the core functionality is implemented and exported from the relevant module in `scripts/modules/`.
|
||||
2. **Create Direct Wrapper**: In [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js):
|
||||
- Import the core function.
|
||||
- Import `getCachedOrExecute` from `../tools/utils.js`.
|
||||
- Create an `async function yourCommandDirect(args, log)` wrapper.
|
||||
- Inside the wrapper:
|
||||
- Determine arguments needed for both the core logic and the cache key (e.g., `tasksPath`, filters). Use `findTasksJsonPath(args, log)` if needed.
|
||||
- **Generate a unique `cacheKey`** based on the arguments that define a distinct operation (e.g., `\`yourCommand:${tasksPath}:${filter}\``).
|
||||
- **Define the `coreActionFn`**: An `async` function that contains the actual call to the imported core logic function, handling its specific errors and returning `{ success: true/false, data/error }`.
|
||||
- **Call `getCachedOrExecute`**:
|
||||
```javascript
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreActionFn, // The function wrapping the core logic call
|
||||
log
|
||||
});
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
```
|
||||
- Export the wrapper function and add it to the `directFunctions` map.
|
||||
3. **Create MCP Tool**: In `mcp-server/src/tools/`:
|
||||
- Create a new file (e.g., `yourCommand.js`).
|
||||
- Import `z` for parameter schema definition.
|
||||
- Import `executeMCPToolAction` from [`./utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
- Import the `yourCommandDirect` wrapper function from `../core/task-master-core.js`.
|
||||
- Implement `registerYourCommandTool(server)`:
|
||||
- Call `server.addTool`.
|
||||
- Define `name`, `description`, and `parameters` using `zod`. Include `projectRoot` and `file` as optional parameters if relevant.
|
||||
- Define the `async execute(args, log)` function.
|
||||
- Inside `execute`, call `executeMCPToolAction`:
|
||||
```javascript
|
||||
return executeMCPToolAction({
|
||||
actionFn: yourCommandDirect, // The direct function wrapper
|
||||
args, // Arguments from the tool call
|
||||
log, // MCP logger instance
|
||||
actionName: 'Your Command Description', // For logging
|
||||
// processResult: customProcessor // Optional: if default filtering isn't enough
|
||||
});
|
||||
```
|
||||
4. **Register Tool**: Import and call `registerYourCommandTool` in [`mcp-server/src/tools/index.js`](mdc:mcp-server/src/tools/index.js).
|
||||
5. **Update `mcp.json`**: Add the new tool definition to the `tools` array in `.cursor/mcp.json`.
|
||||
|
||||
2. **Create Direct Function File in `mcp-server/src/core/direct-functions/`**:
|
||||
- Create a new file (e.g., `your-command.js`) using **kebab-case** naming.
|
||||
- Import necessary core functions, **`findTasksJsonPath` from `../utils/path-utils.js`**, and **silent mode utilities**.
|
||||
- Implement `async function yourCommandDirect(args, log)` using **camelCase** with `Direct` suffix:
|
||||
- **Path Resolution**: Obtain the tasks file path using `const tasksPath = findTasksJsonPath(args, log);`. This handles project root detection automatically based on `args.projectRoot`.
|
||||
- Parse other `args` and perform necessary validation.
|
||||
- **Implement Silent Mode**: Wrap core function calls with enableSilentMode/disableSilentMode.
|
||||
- **If Caching**: Implement caching using `getCachedOrExecute` from `../../tools/utils.js`.
|
||||
- **If Not Caching**: Directly call the core logic function within a try/catch block.
|
||||
- Format the return as `{ success: true/false, data/error, fromCache: boolean }`.
|
||||
- Export the wrapper function.
|
||||
|
||||
3. **Update `task-master-core.js` with Import/Export**: Import and re-export your `*Direct` function and add it to the `directFunctions` map.
|
||||
|
||||
4. **Create MCP Tool (`mcp-server/src/tools/`)**:
|
||||
- Create a new file (e.g., `your-command.js`) using **kebab-case**.
|
||||
- Import `zod`, `handleApiResult`, `createErrorResponse`, **`getProjectRootFromSession`**, and your `yourCommandDirect` function.
|
||||
- Implement `registerYourCommandTool(server)`.
|
||||
- Define the tool `name` using **snake_case** (e.g., `your_command`).
|
||||
- Define the `parameters` using `zod`. **Crucially, define `projectRoot` as optional**: `projectRoot: z.string().optional().describe(...)`. Include `file` if applicable.
|
||||
- Implement the standard `async execute(args, { log, reportProgress, session })` method:
|
||||
- Get `rootFolder` using `getProjectRootFromSession` (with fallback to `args.projectRoot`).
|
||||
- Call `yourCommandDirect({ ...args, projectRoot: rootFolder }, log)`.
|
||||
- Pass the result to `handleApiResult(result, log, 'Error Message')`.
|
||||
|
||||
5. **Register Tool**: Import and call `registerYourCommandTool` in `mcp-server/src/tools/index.js`.
|
||||
|
||||
6. **Update `mcp.json`**: Add the new tool definition to the `tools` array in `.cursor/mcp.json`.
|
||||
|
||||
## Handling Responses
|
||||
|
||||
- MCP tools should return data formatted by `createContentResponse` (which stringifies objects) or `createErrorResponse`.
|
||||
- The `processMCPResponseData` utility automatically removes potentially large fields like `details` and `testStrategy` from task objects before they are returned. This is the default behavior when using `executeMCPToolAction`. If specific fields need to be preserved or different fields removed, a custom `processResult` function can be passed to `executeMCPToolAction`.
|
||||
- The `handleApiResult` utility (used by `executeMCPToolAction`) now expects the result object from the direct function wrapper to include a `fromCache` boolean flag. This flag is included in the final JSON response sent to the MCP client, nested alongside the actual data (e.g., `{ "fromCache": true, "data": { ... } }`).
|
||||
- MCP tools should return the object generated by `handleApiResult`.
|
||||
- `handleApiResult` uses `createContentResponse` or `createErrorResponse` internally.
|
||||
- `handleApiResult` also uses `processMCPResponseData` by default to filter potentially large fields (`details`, `testStrategy`) from task data. Provide a custom processor function to `handleApiResult` if different filtering is needed.
|
||||
- The final JSON response sent to the MCP client will include the `fromCache` boolean flag (obtained from the `*Direct` function's result) alongside the actual data (e.g., `{ "fromCache": true, "data": { ... } }` or `{ "fromCache": false, "data": { ... } }`).
|
||||
|
||||
## Parameter Type Handling
|
||||
|
||||
- **Prefer Direct Function Calls**: For optimal performance and error handling, MCP tools should utilize direct function wrappers defined in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js). These wrappers call the underlying logic from the core modules (e.g., [`task-manager.js`](mdc:scripts/modules/task-manager.js)).
|
||||
- **Standard Tool Execution Pattern**:
|
||||
- The `execute` method within each MCP tool (in `mcp-server/src/tools/*.js`) should:
|
||||
1. Call the corresponding `*Direct` function wrapper (e.g., `listTasksDirect`) from [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js), passing necessary arguments and the logger.
|
||||
2. Receive the result object (typically `{ success, data/error, fromCache }`).
|
||||
3. Pass this result object to the `handleApiResult` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)) for standardized response formatting and error handling.
|
||||
4. Return the formatted response object provided by `handleApiResult`.
|
||||
- **CLI Execution as Fallback**: The `executeTaskMasterCommand` utility in [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) allows executing commands via the CLI (`task-master ...`). This should **only** be used as a fallback if a direct function wrapper is not yet implemented or if a specific command intrinsically requires CLI execution.
|
||||
- **Centralized Utilities** (See also: [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc)):
|
||||
- Use `findTasksJsonPath` (in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js)) *within direct function wrappers* to locate the `tasks.json` file consistently.
|
||||
- **Leverage MCP Utilities**: The file [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) contains essential helpers for MCP tool implementation:
|
||||
- `getProjectRoot`: Normalizes project paths.
|
||||
- `handleApiResult`: Takes the raw result from a `*Direct` function and formats it into a standard MCP success or error response, automatically handling data processing via `processMCPResponseData`. This is called by the tool's `execute` method.
|
||||
- `createContentResponse`/`createErrorResponse`: Used by `handleApiResult` to format successful/error MCP responses.
|
||||
- `processMCPResponseData`: Filters/cleans data (e.g., removing `details`, `testStrategy`) before it's sent in the MCP response. Called by `handleApiResult`.
|
||||
- `getCachedOrExecute`: **Used inside `*Direct` functions** in `task-master-core.js` to implement caching logic.
|
||||
- `executeTaskMasterCommand`: Fallback for executing CLI commands.
|
||||
- **Caching**: To improve performance for frequently called read operations (like `listTasks`, `showTask`, `nextTask`), a caching layer using `lru-cache` is implemented.
|
||||
- **Caching logic resides *within* the direct function wrappers** in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js) using the `getCachedOrExecute` utility from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
- Generate unique cache keys based on function arguments that define a distinct call (e.g., file path, filters).
|
||||
- The `getCachedOrExecute` utility handles checking the cache, executing the core logic function on a cache miss, storing the result, and returning the data along with a `fromCache` flag.
|
||||
- Cache statistics can be monitored using the `cacheStats` MCP tool (implemented via `getCacheStatsDirect`).
|
||||
- **Caching should generally be applied to read-only operations** that don't modify the `tasks.json` state. Commands like `set-status`, `add-task`, `update-task`, `parse-prd`, `add-dependency` should *not* be cached as they change the underlying data.
|
||||
|
||||
**MCP Tool Implementation Checklist**:
|
||||
|
||||
1. **Core Logic Verification**:
|
||||
- [ ] Confirm the core function is properly exported from its module (e.g., `task-manager.js`)
|
||||
- [ ] Identify all required parameters and their types
|
||||
|
||||
2. **Direct Function Wrapper**:
|
||||
- [ ] Create the `*Direct` function in the appropriate file in `mcp-server/src/core/direct-functions/`
|
||||
- [ ] Import silent mode utilities and implement them around core function calls
|
||||
- [ ] Handle all parameter validations and type conversions
|
||||
- [ ] Implement path resolving for relative paths
|
||||
- [ ] Add appropriate error handling with standardized error codes
|
||||
- [ ] Add to imports/exports in `task-master-core.js`
|
||||
|
||||
3. **MCP Tool Implementation**:
|
||||
- [ ] Create new file in `mcp-server/src/tools/` with kebab-case naming
|
||||
- [ ] Define zod schema for all parameters
|
||||
- [ ] Implement the `execute` method following the standard pattern
|
||||
- [ ] Consider using AsyncOperationManager for long-running operations
|
||||
- [ ] Register tool in `mcp-server/src/tools/index.js`
|
||||
|
||||
4. **Testing**:
|
||||
- [ ] Write unit tests for the direct function wrapper
|
||||
- [ ] Write integration tests for the MCP tool
|
||||
|
||||
## Standard Error Codes
|
||||
|
||||
- **Standard Error Codes**: Use consistent error codes across direct function wrappers
|
||||
- `INPUT_VALIDATION_ERROR`: For missing or invalid required parameters
|
||||
- `FILE_NOT_FOUND_ERROR`: For file system path issues
|
||||
- `CORE_FUNCTION_ERROR`: For errors thrown by the core function
|
||||
- `UNEXPECTED_ERROR`: For all other unexpected errors
|
||||
|
||||
- **Error Object Structure**:
|
||||
```javascript
|
||||
{
|
||||
success: false,
|
||||
error: {
|
||||
code: 'ERROR_CODE',
|
||||
message: 'Human-readable error message'
|
||||
},
|
||||
fromCache: false
|
||||
}
|
||||
```
|
||||
|
||||
- **MCP Tool Logging Pattern**:
|
||||
- ✅ DO: Log the start of execution with arguments (sanitized if sensitive)
|
||||
- ✅ DO: Log successful completion with result summary
|
||||
- ✅ DO: Log all error conditions with appropriate log levels
|
||||
- ✅ DO: Include the cache status in result logs
|
||||
- ❌ DON'T: Log entire large data structures or sensitive information
|
||||
|
||||
- The MCP server integrates with Task Master core functions through three layers:
|
||||
1. Tool Definitions (`mcp-server/src/tools/*.js`) - Define parameters and validation
|
||||
2. Direct Functions (`mcp-server/src/core/direct-functions/*.js`) - Handle core logic integration
|
||||
3. Core Functions (`scripts/modules/*.js`) - Implement the actual functionality
|
||||
|
||||
- This layered approach provides:
|
||||
- Clear separation of concerns
|
||||
- Consistent parameter validation
|
||||
- Centralized error handling
|
||||
- Performance optimization through caching (for read operations)
|
||||
- Standardized response formatting
|
||||
|
||||
## MCP Naming Conventions
|
||||
|
||||
- **Files and Directories**:
|
||||
- ✅ DO: Use **kebab-case** for all file names: `list-tasks.js`, `set-task-status.js`
|
||||
- ✅ DO: Use consistent directory structure: `mcp-server/src/tools/` for tool definitions, `mcp-server/src/core/direct-functions/` for direct function implementations
|
||||
|
||||
- **JavaScript Functions**:
|
||||
- ✅ DO: Use **camelCase** with `Direct` suffix for direct function implementations: `listTasksDirect`, `setTaskStatusDirect`
|
||||
- ✅ DO: Use **camelCase** with `Tool` suffix for tool registration functions: `registerListTasksTool`, `registerSetTaskStatusTool`
|
||||
- ✅ DO: Use consistent action function naming inside direct functions: `coreActionFn` or similar descriptive name
|
||||
|
||||
- **MCP Tool Names**:
|
||||
- ✅ DO: Use **snake_case** for tool names exposed to MCP clients: `list_tasks`, `set_task_status`, `parse_prd_document`
|
||||
- ✅ DO: Include the core action in the tool name without redundant words: Use `list_tasks` instead of `list_all_tasks`
|
||||
|
||||
- **Examples**:
|
||||
- File: `list-tasks.js`
|
||||
- Direct Function: `listTasksDirect`
|
||||
- Tool Registration: `registerListTasksTool`
|
||||
- MCP Tool Name: `list_tasks`
|
||||
|
||||
- **Mapping**:
|
||||
- The `directFunctions` map in `task-master-core.js` maps the core function name (in camelCase) to its direct implementation:
|
||||
```javascript
|
||||
export const directFunctions = {
|
||||
list: listTasksDirect,
|
||||
setStatus: setTaskStatusDirect,
|
||||
// Add more functions as implemented
|
||||
};
|
||||
```
|
||||
|
||||
@@ -31,6 +31,101 @@ The standard pattern for adding a feature follows this workflow:
|
||||
5. **Configuration**: Update any configuration in [`utils.js`](mdc:scripts/modules/utils.js) if needed, following [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc).
|
||||
6. **Documentation**: Update help text and documentation in [dev_workflow.mdc](mdc:scripts/modules/dev_workflow.mdc)
|
||||
|
||||
## Critical Checklist for New Features
|
||||
|
||||
- **Comprehensive Function Exports**:
|
||||
- ✅ **DO**: Export all helper functions and utility methods needed by your new function
|
||||
- ✅ **DO**: Review dependencies and ensure functions like `findTaskById`, `taskExists` are exported
|
||||
- ❌ **DON'T**: Assume internal functions are already exported - always check and add them explicitly
|
||||
- **Example**: If implementing a feature that checks task existence, ensure the helper function is in exports:
|
||||
```javascript
|
||||
// At the bottom of your module file:
|
||||
export {
|
||||
// ... existing exports ...
|
||||
yourNewFunction,
|
||||
taskExists, // Helper function used by yourNewFunction
|
||||
findTaskById, // Helper function used by yourNewFunction
|
||||
};
|
||||
```
|
||||
|
||||
- **Parameter Completeness**:
|
||||
- ✅ **DO**: Pass all required parameters to functions you call within your implementation
|
||||
- ✅ **DO**: Check function signatures before implementing calls to them
|
||||
- ❌ **DON'T**: Assume default parameter values will handle missing arguments
|
||||
- **Example**: When calling file generation, pass both required parameters:
|
||||
```javascript
|
||||
// ✅ DO: Pass all required parameters
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
|
||||
// ❌ DON'T: Omit required parameters
|
||||
await generateTaskFiles(tasksPath); // Error - missing outputDir parameter
|
||||
```
|
||||
|
||||
- **Consistent File Path Handling**:
|
||||
- ✅ **DO**: Use consistent file naming conventions: `task_${id.toString().padStart(3, '0')}.txt`
|
||||
- ✅ **DO**: Use `path.join()` for composing file paths
|
||||
- ✅ **DO**: Use appropriate file extensions (.txt for tasks, .json for data)
|
||||
- ❌ **DON'T**: Hardcode path separators or inconsistent file extensions
|
||||
- **Example**: Creating file paths for tasks:
|
||||
```javascript
|
||||
// ✅ DO: Use consistent file naming and path.join
|
||||
const taskFileName = path.join(
|
||||
path.dirname(tasksPath),
|
||||
`task_${taskId.toString().padStart(3, '0')}.txt`
|
||||
);
|
||||
|
||||
// ❌ DON'T: Use inconsistent naming or string concatenation
|
||||
const taskFileName = path.dirname(tasksPath) + '/' + taskId + '.md';
|
||||
```
|
||||
|
||||
- **Error Handling and Reporting**:
|
||||
- ✅ **DO**: Use structured error objects with code and message properties
|
||||
- ✅ **DO**: Include clear error messages identifying the specific problem
|
||||
- ✅ **DO**: Handle both function-specific errors and potential file system errors
|
||||
- ✅ **DO**: Log errors at appropriate severity levels
|
||||
- **Example**: Structured error handling in core functions:
|
||||
```javascript
|
||||
try {
|
||||
// Implementation...
|
||||
} catch (error) {
|
||||
log('error', `Error removing task: ${error.message}`);
|
||||
throw {
|
||||
code: 'REMOVE_TASK_ERROR',
|
||||
message: error.message,
|
||||
details: error.stack
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
- **Silent Mode Implementation**:
|
||||
- ✅ **DO**: Import silent mode utilities in direct functions: `import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';`
|
||||
- ✅ **DO**: Wrap core function calls with silent mode:
|
||||
```javascript
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the core function
|
||||
const result = await coreFunction(...);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
```
|
||||
- ✅ **DO**: Ensure silent mode is disabled in error handling:
|
||||
```javascript
|
||||
try {
|
||||
enableSilentMode();
|
||||
// Core function call
|
||||
disableSilentMode();
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
throw error; // Rethrow to be caught by outer catch block
|
||||
}
|
||||
```
|
||||
- ✅ **DO**: Add silent mode handling in all direct functions that call core functions
|
||||
- ❌ **DON'T**: Forget to disable silent mode, which would suppress all future logs
|
||||
- ❌ **DON'T**: Enable silent mode outside of direct functions in the MCP server
|
||||
|
||||
```javascript
|
||||
// 1. CORE LOGIC: Add function to appropriate module (example in task-manager.js)
|
||||
/**
|
||||
@@ -312,48 +407,122 @@ For more information on module structure, see [`MODULE_PLAN.md`](mdc:scripts/mod
|
||||
|
||||
## Adding MCP Server Support for Commands
|
||||
|
||||
Integrating Task Master commands with the MCP server (for use by tools like Cursor) follows a specific pattern distinct from the CLI command implementation.
|
||||
Integrating Task Master commands with the MCP server (for use by tools like Cursor) follows a specific pattern distinct from the CLI command implementation, prioritizing performance and reliability.
|
||||
|
||||
- **Goal**: Leverage direct function calls for performance and reliability, avoiding CLI overhead.
|
||||
- **Goal**: Leverage direct function calls to core logic, avoiding CLI overhead.
|
||||
- **Reference**: See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for full details.
|
||||
|
||||
**MCP Integration Workflow**:
|
||||
|
||||
1. **Core Logic**: Ensure the command's core logic exists in the appropriate module (e.g., [`task-manager.js`](mdc:scripts/modules/task-manager.js)).
|
||||
2. **Direct Function Wrapper**:
|
||||
- In [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js), create an `async function yourCommandDirect(args, log)`.
|
||||
- This function imports and calls the core logic.
|
||||
- It uses utilities like `findTasksJsonPath` if needed.
|
||||
- It handles argument parsing and validation specific to the direct call.
|
||||
- **Implement Caching (if applicable)**: For read operations that benefit from caching, use the `getCachedOrExecute` utility here to wrap the core logic call. Generate a unique cache key based on relevant arguments.
|
||||
- It returns a standard `{ success: true/false, data/error, fromCache: boolean }` object.
|
||||
- Export the function and add it to the `directFunctions` map.
|
||||
3. **MCP Tool File**:
|
||||
- Create a new file in `mcp-server/src/tools/` (e.g., `yourCommand.js`).
|
||||
- Import `zod`, `executeMCPToolAction` from `./utils.js`, and your `yourCommandDirect` function.
|
||||
- Implement `registerYourCommandTool(server)` which calls `server.addTool`:
|
||||
- Define the tool `name`, `description`, and `parameters` using `zod`. Include optional `projectRoot` and `file` if relevant, following patterns in existing tools.
|
||||
- Define the `async execute(args, log)` method for the tool.
|
||||
- **Crucially**, the `execute` method should primarily call `executeMCPToolAction`:
|
||||
```javascript
|
||||
// In mcp-server/src/tools/yourCommand.js
|
||||
import { executeMCPToolAction } from "./utils.js";
|
||||
import { yourCommandDirect } from "../core/task-master-core.js";
|
||||
import { z } from "zod";
|
||||
1. **Core Logic**: Ensure the command's core logic exists and is exported from the appropriate module (e.g., [`task-manager.js`](mdc:scripts/modules/task-manager.js)).
|
||||
2. **Direct Function Wrapper (`mcp-server/src/core/direct-functions/`)**:
|
||||
- Create a new file (e.g., `your-command.js`) in `mcp-server/src/core/direct-functions/` using **kebab-case** naming.
|
||||
- Import the core logic function, necessary MCP utilities like **`findTasksJsonPath` from `../utils/path-utils.js`**, and **silent mode utilities**: `import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';`
|
||||
- Implement an `async function yourCommandDirect(args, log)` using **camelCase** with `Direct` suffix.
|
||||
- **Path Finding**: Inside this function, obtain the `tasksPath` by calling `const tasksPath = findTasksJsonPath(args, log);`. This relies on `args.projectRoot` (derived from the session) being passed correctly.
|
||||
- Perform validation on other arguments received in `args`.
|
||||
- **Implement Silent Mode**: Wrap core function calls with `enableSilentMode()` and `disableSilentMode()` to prevent logs from interfering with JSON responses.
|
||||
- **If Caching**: Implement caching using `getCachedOrExecute` from `../../tools/utils.js`.
|
||||
- **If Not Caching**: Directly call the core logic function within a try/catch block.
|
||||
- Format the return as `{ success: true/false, data/error, fromCache: boolean }`.
|
||||
- Export the wrapper function.
|
||||
|
||||
export function registerYourCommandTool(server) {
|
||||
server.addTool({
|
||||
name: "yourCommand",
|
||||
description: "Description of your command.",
|
||||
parameters: z.object({ /* zod schema */ }),
|
||||
async execute(args, log) {
|
||||
return executeMCPToolAction({
|
||||
actionFn: yourCommandDirect, // Pass the direct function wrapper
|
||||
args, log, actionName: "Your Command Description"
|
||||
3. **Update `task-master-core.js` with Import/Export**: Import and re-export your `*Direct` function and add it to the `directFunctions` map.
|
||||
|
||||
4. **Create MCP Tool (`mcp-server/src/tools/`)**:
|
||||
- Create a new file (e.g., `your-command.js`) using **kebab-case**.
|
||||
- Import `zod`, `handleApiResult`, `createErrorResponse`, **`getProjectRootFromSession`**, and your `yourCommandDirect` function.
|
||||
- Implement `registerYourCommandTool(server)`.
|
||||
- Define the tool `name` using **snake_case** (e.g., `your_command`).
|
||||
- Define the `parameters` using `zod`. **Crucially, define `projectRoot` as optional**: `projectRoot: z.string().optional().describe(...)`. Include `file` if applicable.
|
||||
- Implement the standard `async execute(args, { log, reportProgress, session })` method:
|
||||
- Get `rootFolder` using `getProjectRootFromSession` (with fallback to `args.projectRoot`).
|
||||
- Call `yourCommandDirect({ ...args, projectRoot: rootFolder }, log)`.
|
||||
- Pass the result to `handleApiResult(result, log, 'Error Message')`.
|
||||
|
||||
5. **Register Tool**: Import and call `registerYourCommandTool` in `mcp-server/src/tools/index.js`.
|
||||
|
||||
6. **Update `mcp.json`**: Add the new tool definition to the `tools` array in `.cursor/mcp.json`.
|
||||
|
||||
## Implementing Background Operations
|
||||
|
||||
For long-running operations that should not block the client, use the AsyncOperationManager:
|
||||
|
||||
1. **Identify Background-Appropriate Operations**:
|
||||
- ✅ **DO**: Use async operations for CPU-intensive tasks like task expansion or PRD parsing
|
||||
- ✅ **DO**: Consider async operations for tasks that may take more than 1-2 seconds
|
||||
- ❌ **DON'T**: Use async operations for quick read/status operations
|
||||
- ❌ **DON'T**: Use async operations when immediate feedback is critical
|
||||
|
||||
2. **Use AsyncOperationManager in MCP Tools**:
|
||||
```javascript
|
||||
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
||||
|
||||
// In execute method:
|
||||
const operationId = asyncOperationManager.addOperation(
|
||||
expandTaskDirect, // The direct function to run in background
|
||||
{ ...args, projectRoot: rootFolder }, // Args to pass to the function
|
||||
{ log, reportProgress, session } // Context to preserve for the operation
|
||||
);
|
||||
|
||||
// Return immediate response with operation ID
|
||||
return createContentResponse({
|
||||
message: "Operation started successfully",
|
||||
operationId,
|
||||
status: "pending"
|
||||
});
|
||||
```
|
||||
|
||||
3. **Implement Progress Reporting**:
|
||||
- ✅ **DO**: Use the reportProgress function in direct functions:
|
||||
```javascript
|
||||
// In your direct function:
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: 50 }); // 50% complete
|
||||
}
|
||||
```
|
||||
- AsyncOperationManager will forward progress updates to the client
|
||||
|
||||
4. **Check Operation Status**:
|
||||
- Implement a way for clients to check status using the `get_operation_status` MCP tool
|
||||
- Return appropriate status codes and messages
|
||||
|
||||
## Project Initialization
|
||||
|
||||
When implementing project initialization commands:
|
||||
|
||||
1. **Support Programmatic Initialization**:
|
||||
- ✅ **DO**: Design initialization to work with both CLI and MCP
|
||||
- ✅ **DO**: Support non-interactive modes with sensible defaults
|
||||
- ✅ **DO**: Handle project metadata like name, description, version
|
||||
- ✅ **DO**: Create necessary files and directories
|
||||
|
||||
2. **In MCP Tool Implementation**:
|
||||
```javascript
|
||||
// In initialize-project.js MCP tool:
|
||||
import { z } from "zod";
|
||||
import { initializeProjectDirect } from "../core/task-master-core.js";
|
||||
|
||||
export function registerInitializeProjectTool(server) {
|
||||
server.addTool({
|
||||
name: "initialize_project",
|
||||
description: "Initialize a new Task Master project",
|
||||
parameters: z.object({
|
||||
projectName: z.string().optional().describe("The name for the new project"),
|
||||
projectDescription: z.string().optional().describe("A brief description"),
|
||||
projectVersion: z.string().optional().describe("Initial version (e.g., '0.1.0')"),
|
||||
// Add other parameters as needed
|
||||
}),
|
||||
execute: async (args, { log, reportProgress, session }) => {
|
||||
try {
|
||||
// No need for project root since we're creating a new project
|
||||
const result = await initializeProjectDirect(args, log);
|
||||
return handleApiResult(result, log, 'Error initializing project');
|
||||
} catch (error) {
|
||||
log.error(`Error in initialize_project: ${error.message}`);
|
||||
return createErrorResponse(`Failed to initialize project: ${error.message}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
4. **Register in Tool Index**: Import and call `registerYourCommandTool` in [`mcp-server/src/tools/index.js`](mdc:mcp-server/src/tools/index.js).
|
||||
5. **Update `mcp.json`**: Add the tool definition to `.cursor/mcp.json`.
|
||||
|
||||
322
.cursor/rules/taskmaster.mdc
Normal file
322
.cursor/rules/taskmaster.mdc
Normal file
@@ -0,0 +1,322 @@
|
||||
---
|
||||
description: Comprehensive reference for Taskmaster MCP tools and CLI commands.
|
||||
globs: **/*
|
||||
alwaysApply: true
|
||||
---
|
||||
|
||||
# Taskmaster Tool & Command Reference
|
||||
|
||||
This document provides a detailed reference for interacting with Taskmaster, covering both the recommended MCP tools (for integrations like Cursor) and the corresponding `task-master` CLI commands (for direct user interaction or fallback).
|
||||
|
||||
**Note:** For interacting with Taskmaster programmatically or via integrated tools, using the **MCP tools is strongly recommended** due to better performance, structured data, and error handling. The CLI commands serve as a user-friendly alternative and fallback. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for MCP implementation details and [`commands.mdc`](mdc:.cursor/rules/commands.mdc) for CLI implementation guidelines.
|
||||
|
||||
---
|
||||
|
||||
## Initialization & Setup
|
||||
|
||||
### 1. Initialize Project (`init`)
|
||||
|
||||
* **MCP Tool:** `initialize_project`
|
||||
* **CLI Command:** `task-master init [options]`
|
||||
* **Description:** `Set up the basic Taskmaster file structure and configuration in the current directory for a new project.`
|
||||
* **Key CLI Options:**
|
||||
* `--name <name>`: `Set the name for your project in Taskmaster's configuration.`
|
||||
* `--description <text>`: `Provide a brief description for your project.`
|
||||
* `--version <version>`: `Set the initial version for your project (e.g., '0.1.0').`
|
||||
* `-y, --yes`: `Initialize Taskmaster quickly using default settings without interactive prompts.`
|
||||
* **Usage:** Run this once at the beginning of a new project.
|
||||
* **MCP Variant Description:** `Set up the basic Taskmaster file structure and configuration in the current directory for a new project by running the 'task-master init' command.`
|
||||
* **Key MCP Parameters/Options:**
|
||||
* `projectName`: `Set the name for your project.` (CLI: `--name <name>`)
|
||||
* `projectDescription`: `Provide a brief description for your project.` (CLI: `--description <text>`)
|
||||
* `projectVersion`: `Set the initial version for your project (e.g., '0.1.0').` (CLI: `--version <version>`)
|
||||
* `authorName`: `Author name.` (CLI: `--author <author>`)
|
||||
* `skipInstall`: `Skip installing dependencies (default: false).` (CLI: `--skip-install`)
|
||||
* `addAliases`: `Add shell aliases (tm, taskmaster) (default: false).` (CLI: `--aliases`)
|
||||
* `yes`: `Skip prompts and use defaults/provided arguments (default: false).` (CLI: `-y, --yes`)
|
||||
* **Usage:** Run this once at the beginning of a new project, typically via an integrated tool like Cursor. Operates on the current working directory of the MCP server.
|
||||
|
||||
|
||||
### 2. Parse PRD (`parse_prd`)
|
||||
|
||||
* **MCP Tool:** `parse_prd`
|
||||
* **CLI Command:** `task-master parse-prd [file] [options]`
|
||||
* **Description:** `Parse a Product Requirements Document (PRD) or text file with Taskmaster to automatically generate an initial set of tasks in tasks.json.`
|
||||
* **Key Parameters/Options:**
|
||||
* `input`: `Path to your PRD or requirements text file that Taskmaster should parse for tasks.` (CLI: `[file]` positional or `-i, --input <file>`)
|
||||
* `output`: `Specify where Taskmaster should save the generated 'tasks.json' file (default: 'tasks/tasks.json').` (CLI: `-o, --output <file>`)
|
||||
* `numTasks`: `Approximate number of top-level tasks Taskmaster should aim to generate from the document.` (CLI: `-n, --num-tasks <number>`)
|
||||
* `force`: `Use this to allow Taskmaster to overwrite an existing 'tasks.json' without asking for confirmation.` (CLI: `-f, --force`)
|
||||
* **Usage:** Useful for bootstrapping a project from an existing requirements document.
|
||||
* **Notes:** Task Master will strictly adhere to any specific requirements mentioned in the PRD (libraries, database schemas, frameworks, tech stacks, etc.) while filling in any gaps where the PRD isn't fully specified. Tasks are designed to provide the most direct implementation path while avoiding over-engineering.
|
||||
|
||||
---
|
||||
|
||||
## Task Listing & Viewing
|
||||
|
||||
### 3. Get Tasks (`get_tasks`)
|
||||
|
||||
* **MCP Tool:** `get_tasks`
|
||||
* **CLI Command:** `task-master list [options]`
|
||||
* **Description:** `List your Taskmaster tasks, optionally filtering by status and showing subtasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `status`: `Show only Taskmaster tasks matching this status (e.g., 'pending', 'done').` (CLI: `-s, --status <status>`)
|
||||
* `withSubtasks`: `Include subtasks indented under their parent tasks in the list.` (CLI: `--with-subtasks`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Get an overview of the project status, often used at the start of a work session.
|
||||
|
||||
### 4. Get Next Task (`next_task`)
|
||||
|
||||
* **MCP Tool:** `next_task`
|
||||
* **CLI Command:** `task-master next [options]`
|
||||
* **Description:** `Ask Taskmaster to show the next available task you can work on, based on status and completed dependencies.`
|
||||
* **Key Parameters/Options:**
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Identify what to work on next according to the plan.
|
||||
|
||||
### 5. Get Task Details (`get_task`)
|
||||
|
||||
* **MCP Tool:** `get_task`
|
||||
* **CLI Command:** `task-master show [id] [options]`
|
||||
* **Description:** `Display detailed information for a specific Taskmaster task or subtask by its ID.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster task (e.g., '15') or subtask (e.g., '15.2') you want to view.` (CLI: `[id]` positional or `-i, --id <id>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Understand the full details, implementation notes, and test strategy for a specific task before starting work.
|
||||
|
||||
---
|
||||
|
||||
## Task Creation & Modification
|
||||
|
||||
### 6. Add Task (`add_task`)
|
||||
|
||||
* **MCP Tool:** `add_task`
|
||||
* **CLI Command:** `task-master add-task [options]`
|
||||
* **Description:** `Add a new task to Taskmaster by describing it; AI will structure it.`
|
||||
* **Key Parameters/Options:**
|
||||
* `prompt`: `Required. Describe the new task you want Taskmaster to create (e.g., "Implement user authentication using JWT").` (CLI: `-p, --prompt <text>`)
|
||||
* `dependencies`: `Specify the IDs of any Taskmaster tasks that must be completed before this new one can start (e.g., '12,14').` (CLI: `-d, --dependencies <ids>`)
|
||||
* `priority`: `Set the priority for the new task ('high', 'medium', 'low'; default: 'medium').` (CLI: `--priority <priority>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Quickly add newly identified tasks during development.
|
||||
|
||||
### 7. Add Subtask (`add_subtask`)
|
||||
|
||||
* **MCP Tool:** `add_subtask`
|
||||
* **CLI Command:** `task-master add-subtask [options]`
|
||||
* **Description:** `Add a new subtask to a Taskmaster parent task, or convert an existing task into a subtask.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id` / `parent`: `Required. The ID of the Taskmaster task that will be the parent.` (MCP: `id`, CLI: `-p, --parent <id>`)
|
||||
* `taskId`: `Use this if you want to convert an existing top-level Taskmaster task into a subtask of the specified parent.` (CLI: `-i, --task-id <id>`)
|
||||
* `title`: `Required (if not using taskId). The title for the new subtask Taskmaster should create.` (CLI: `-t, --title <title>`)
|
||||
* `description`: `A brief description for the new subtask.` (CLI: `-d, --description <text>`)
|
||||
* `details`: `Provide implementation notes or details for the new subtask.` (CLI: `--details <text>`)
|
||||
* `dependencies`: `Specify IDs of other tasks or subtasks (e.g., '15', '16.1') that must be done before this new subtask.` (CLI: `--dependencies <ids>`)
|
||||
* `status`: `Set the initial status for the new subtask (default: 'pending').` (CLI: `-s, --status <status>`)
|
||||
* `skipGenerate`: `Prevent Taskmaster from automatically regenerating markdown task files after adding the subtask.` (CLI: `--skip-generate`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Break down tasks manually or reorganize existing tasks.
|
||||
|
||||
### 8. Update Tasks (`update`)
|
||||
|
||||
* **MCP Tool:** `update`
|
||||
* **CLI Command:** `task-master update [options]`
|
||||
* **Description:** `Update multiple upcoming tasks in Taskmaster based on new context or changes, starting from a specific task ID.`
|
||||
* **Key Parameters/Options:**
|
||||
* `from`: `Required. The ID of the first task Taskmaster should update. All tasks with this ID or higher (and not 'done') will be considered.` (CLI: `--from <id>`)
|
||||
* `prompt`: `Required. Explain the change or new context for Taskmaster to apply to the tasks (e.g., "We are now using React Query instead of Redux Toolkit for data fetching").` (CLI: `-p, --prompt <text>`)
|
||||
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates based on external knowledge (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Handle significant implementation changes or pivots that affect multiple future tasks.
|
||||
|
||||
### 9. Update Task (`update_task`)
|
||||
|
||||
* **MCP Tool:** `update_task`
|
||||
* **CLI Command:** `task-master update-task [options]`
|
||||
* **Description:** `Modify a specific Taskmaster task (or subtask) by its ID, incorporating new information or changes.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The specific ID of the Taskmaster task (e.g., '15') or subtask (e.g., '15.2') you want to update.` (CLI: `-i, --id <id>`)
|
||||
* `prompt`: `Required. Explain the specific changes or provide the new information Taskmaster should incorporate into this task.` (CLI: `-p, --prompt <text>`)
|
||||
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Refine a specific task based on new understanding or feedback.
|
||||
|
||||
### 10. Update Subtask (`update_subtask`)
|
||||
|
||||
* **MCP Tool:** `update_subtask`
|
||||
* **CLI Command:** `task-master update-subtask [options]`
|
||||
* **Description:** `Append timestamped notes or details to a specific Taskmaster subtask without overwriting existing content.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The specific ID of the Taskmaster subtask (e.g., '15.2') you want to add information to.` (CLI: `-i, --id <id>`)
|
||||
* `prompt`: `Required. Provide the information or notes Taskmaster should append to the subtask's details.` (CLI: `-p, --prompt <text>`)
|
||||
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Add implementation notes, code snippets, or clarifications to a subtask during development.
|
||||
|
||||
### 11. Set Task Status (`set_task_status`)
|
||||
|
||||
* **MCP Tool:** `set_task_status`
|
||||
* **CLI Command:** `task-master set-status [options]`
|
||||
* **Description:** `Update the status of one or more Taskmaster tasks or subtasks (e.g., 'pending', 'in-progress', 'done').`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID(s) of the Taskmaster task(s) or subtask(s) (e.g., '15', '15.2', '16,17.1') to update.` (CLI: `-i, --id <id>`)
|
||||
* `status`: `Required. The new status to set (e.g., 'done', 'pending', 'in-progress', 'review', 'cancelled').` (CLI: `-s, --status <status>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Mark progress as tasks move through the development cycle.
|
||||
|
||||
### 12. Remove Task (`remove_task`)
|
||||
|
||||
* **MCP Tool:** `remove_task`
|
||||
* **CLI Command:** `task-master remove-task [options]`
|
||||
* **Description:** `Permanently remove a task or subtask from the Taskmaster tasks list.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster task (e.g., '5') or subtask (e.g., '5.2') to permanently remove.` (CLI: `-i, --id <id>`)
|
||||
* `yes`: `Skip the confirmation prompt and immediately delete the task.` (CLI: `-y, --yes`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Permanently delete tasks or subtasks that are no longer needed in the project.
|
||||
* **Notes:** Use with caution as this operation cannot be undone. Consider using 'blocked', 'cancelled', or 'deferred' status instead if you just want to exclude a task from active planning but keep it for reference. The command automatically cleans up dependency references in other tasks.
|
||||
|
||||
---
|
||||
|
||||
## Task Structure & Breakdown
|
||||
|
||||
### 13. Expand Task (`expand_task`)
|
||||
|
||||
* **MCP Tool:** `expand_task`
|
||||
* **CLI Command:** `task-master expand [options]`
|
||||
* **Description:** `Use Taskmaster's AI to break down a complex task (or all tasks) into smaller, manageable subtasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `The ID of the specific Taskmaster task you want to break down into subtasks.` (CLI: `-i, --id <id>`)
|
||||
* `num`: `Suggests how many subtasks Taskmaster should aim to create (uses complexity analysis by default).` (CLI: `-n, --num <number>`)
|
||||
* `research`: `Enable Taskmaster to use Perplexity AI for more informed subtask generation (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||
* `prompt`: `Provide extra context or specific instructions to Taskmaster for generating the subtasks.` (CLI: `-p, --prompt <text>`)
|
||||
* `force`: `Use this to make Taskmaster replace existing subtasks with newly generated ones.` (CLI: `--force`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Generate a detailed implementation plan for a complex task before starting coding.
|
||||
|
||||
### 14. Expand All Tasks (`expand_all`)
|
||||
|
||||
* **MCP Tool:** `expand_all`
|
||||
* **CLI Command:** `task-master expand --all [options]` (Note: CLI uses the `expand` command with the `--all` flag)
|
||||
* **Description:** `Tell Taskmaster to automatically expand all 'pending' tasks based on complexity analysis.`
|
||||
* **Key Parameters/Options:**
|
||||
* `num`: `Suggests how many subtasks Taskmaster should aim to create per task.` (CLI: `-n, --num <number>`)
|
||||
* `research`: `Enable Perplexity AI for more informed subtask generation (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||
* `prompt`: `Provide extra context for Taskmaster to apply generally during expansion.` (CLI: `-p, --prompt <text>`)
|
||||
* `force`: `Make Taskmaster replace existing subtasks.` (CLI: `--force`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Useful after initial task generation or complexity analysis to break down multiple tasks at once.
|
||||
|
||||
### 15. Clear Subtasks (`clear_subtasks`)
|
||||
|
||||
* **MCP Tool:** `clear_subtasks`
|
||||
* **CLI Command:** `task-master clear-subtasks [options]`
|
||||
* **Description:** `Remove all subtasks from one or more specified Taskmaster parent tasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `The ID(s) of the Taskmaster parent task(s) whose subtasks you want to remove (e.g., '15', '16,18').` (Required unless using `all`) (CLI: `-i, --id <ids>`)
|
||||
* `all`: `Tell Taskmaster to remove subtasks from all parent tasks.` (CLI: `--all`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Used before regenerating subtasks with `expand_task` if the previous breakdown needs replacement.
|
||||
|
||||
### 16. Remove Subtask (`remove_subtask`)
|
||||
|
||||
* **MCP Tool:** `remove_subtask`
|
||||
* **CLI Command:** `task-master remove-subtask [options]`
|
||||
* **Description:** `Remove a subtask from its Taskmaster parent, optionally converting it into a standalone task.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID(s) of the Taskmaster subtask(s) to remove (e.g., '15.2', '16.1,16.3').` (CLI: `-i, --id <id>`)
|
||||
* `convert`: `If used, Taskmaster will turn the subtask into a regular top-level task instead of deleting it.` (CLI: `-c, --convert`)
|
||||
* `skipGenerate`: `Prevent Taskmaster from automatically regenerating markdown task files after removing the subtask.` (CLI: `--skip-generate`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Delete unnecessary subtasks or promote a subtask to a top-level task.
|
||||
|
||||
---
|
||||
|
||||
## Dependency Management
|
||||
|
||||
### 17. Add Dependency (`add_dependency`)
|
||||
|
||||
* **MCP Tool:** `add_dependency`
|
||||
* **CLI Command:** `task-master add-dependency [options]`
|
||||
* **Description:** `Define a dependency in Taskmaster, making one task a prerequisite for another.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster task that will depend on another.` (CLI: `-i, --id <id>`)
|
||||
* `dependsOn`: `Required. The ID of the Taskmaster task that must be completed first (the prerequisite).` (CLI: `-d, --depends-on <id>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Establish the correct order of execution between tasks.
|
||||
|
||||
### 18. Remove Dependency (`remove_dependency`)
|
||||
|
||||
* **MCP Tool:** `remove_dependency`
|
||||
* **CLI Command:** `task-master remove-dependency [options]`
|
||||
* **Description:** `Remove a dependency relationship between two Taskmaster tasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `id`: `Required. The ID of the Taskmaster task you want to remove a prerequisite from.` (CLI: `-i, --id <id>`)
|
||||
* `dependsOn`: `Required. The ID of the Taskmaster task that should no longer be a prerequisite.` (CLI: `-d, --depends-on <id>`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Update task relationships when the order of execution changes.
|
||||
|
||||
### 19. Validate Dependencies (`validate_dependencies`)
|
||||
|
||||
* **MCP Tool:** `validate_dependencies`
|
||||
* **CLI Command:** `task-master validate-dependencies [options]`
|
||||
* **Description:** `Check your Taskmaster tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.`
|
||||
* **Key Parameters/Options:**
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Audit the integrity of your task dependencies.
|
||||
|
||||
### 20. Fix Dependencies (`fix_dependencies`)
|
||||
|
||||
* **MCP Tool:** `fix_dependencies`
|
||||
* **CLI Command:** `task-master fix-dependencies [options]`
|
||||
* **Description:** `Automatically fix dependency issues (like circular references or links to non-existent tasks) in your Taskmaster tasks.`
|
||||
* **Key Parameters/Options:**
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Clean up dependency errors automatically.
|
||||
|
||||
---
|
||||
|
||||
## Analysis & Reporting
|
||||
|
||||
### 21. Analyze Complexity (`analyze_complexity`)
|
||||
|
||||
* **MCP Tool:** `analyze_complexity`
|
||||
* **CLI Command:** `task-master analyze-complexity [options]`
|
||||
* **Description:** `Let Taskmaster analyze the complexity of your tasks and generate a report with recommendations for which ones need breaking down.`
|
||||
* **Key Parameters/Options:**
|
||||
* `output`: `Where Taskmaster should save the JSON complexity analysis report (default: 'scripts/task-complexity-report.json').` (CLI: `-o, --output <file>`)
|
||||
* `threshold`: `The minimum complexity score (1-10) for Taskmaster to recommend expanding a task.` (CLI: `-t, --threshold <number>`)
|
||||
* `research`: `Enable Taskmaster to use Perplexity AI for more informed complexity analysis (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** Identify which tasks are likely too large and need further breakdown before implementation.
|
||||
|
||||
### 22. Complexity Report (`complexity_report`)
|
||||
|
||||
* **MCP Tool:** `complexity_report`
|
||||
* **CLI Command:** `task-master complexity-report [options]`
|
||||
* **Description:** `Display the Taskmaster task complexity analysis report generated by 'analyze-complexity'.`
|
||||
* **Key Parameters/Options:**
|
||||
* `file`: `Path to the JSON complexity report file (default: 'scripts/task-complexity-report.json').` (CLI: `-f, --file <file>`)
|
||||
* **Usage:** View the formatted results of the complexity analysis to guide task expansion.
|
||||
|
||||
---
|
||||
|
||||
## File Generation
|
||||
|
||||
### 23. Generate Task Files (`generate`)
|
||||
|
||||
* **MCP Tool:** `generate`
|
||||
* **CLI Command:** `task-master generate [options]`
|
||||
* **Description:** `Generate individual markdown files for each task and subtask defined in your Taskmaster 'tasks.json'.`
|
||||
* **Key Parameters/Options:**
|
||||
* `file`: `Path to your Taskmaster 'tasks.json' file containing the task data (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||
* `output`: `The directory where Taskmaster should save the generated markdown task files (default: 'tasks').` (CLI: `-o, --output <dir>`)
|
||||
* **Usage:** Create/update the individual `.md` files in the `tasks/` directory, useful for tracking changes in git or viewing tasks individually.
|
||||
|
||||
---
|
||||
|
||||
## Configuration & Metadata
|
||||
|
||||
- **Environment Variables**: Taskmaster relies on environment variables for configuration (API keys, model preferences, default settings). See [`dev_workflow.mdc`](mdc:.cursor/rules/dev_workflow.mdc) or the project README for a list.
|
||||
- **`tasks.json`**: The core data file containing the array of tasks and their details. See [`tasks.mdc`](mdc:.cursor/rules/tasks.mdc) for details.
|
||||
- **`task_xxx.md` files**: Individual markdown files generated by the `generate` command/tool, reflecting the content of `tasks.json`.
|
||||
@@ -44,6 +44,12 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
- **Location**:
|
||||
- **Core CLI Utilities**: Place utilities used primarily by the core `task-master` CLI logic and command modules (`scripts/modules/*`) into [`scripts/modules/utils.js`](mdc:scripts/modules/utils.js).
|
||||
- **MCP Server Utilities**: Place utilities specifically designed to support the MCP server implementation into the appropriate subdirectories within `mcp-server/src/`.
|
||||
- Path/Core Logic Helpers: [`mcp-server/src/core/utils/`](mdc:mcp-server/src/core/utils/) (e.g., `path-utils.js`).
|
||||
- Tool Execution/Response Helpers: [`mcp-server/src/tools/utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
|
||||
## Documentation Standards
|
||||
|
||||
- **JSDoc Format**:
|
||||
@@ -73,7 +79,7 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Management
|
||||
## Configuration Management (in `scripts/modules/utils.js`)
|
||||
|
||||
- **Environment Variables**:
|
||||
- ✅ DO: Provide default values for all configuration
|
||||
@@ -84,19 +90,19 @@ alwaysApply: false
|
||||
```javascript
|
||||
// ✅ DO: Set up configuration with defaults and environment overrides
|
||||
const CONFIG = {
|
||||
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
|
||||
model: process.env.MODEL || 'claude-3-opus-20240229', // Updated default model
|
||||
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
|
||||
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
|
||||
debug: process.env.DEBUG === "true",
|
||||
logLevel: process.env.LOG_LEVEL || "info",
|
||||
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || "3"),
|
||||
defaultPriority: process.env.DEFAULT_PRIORITY || "medium",
|
||||
projectName: process.env.PROJECT_NAME || "Task Master",
|
||||
projectVersion: "1.5.0" // Version should be hardcoded
|
||||
projectName: process.env.PROJECT_NAME || "Task Master Project", // Generic project name
|
||||
projectVersion: "1.5.0" // Version should be updated via release process
|
||||
};
|
||||
```
|
||||
|
||||
## Logging Utilities
|
||||
## Logging Utilities (in `scripts/modules/utils.js`)
|
||||
|
||||
- **Log Levels**:
|
||||
- ✅ DO: Support multiple log levels (debug, info, warn, error)
|
||||
@@ -129,18 +135,23 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
## File Operations
|
||||
## File Operations (in `scripts/modules/utils.js`)
|
||||
|
||||
- **Error Handling**:
|
||||
- ✅ DO: Use try/catch blocks for all file operations
|
||||
- ✅ DO: Return null or a default value on failure
|
||||
- ✅ DO: Log detailed error information
|
||||
- ❌ DON'T: Allow exceptions to propagate unhandled
|
||||
- ✅ DO: Log detailed error information using the `log` utility
|
||||
- ❌ DON'T: Allow exceptions to propagate unhandled from simple file reads/writes
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Handle file operation errors properly
|
||||
// ✅ DO: Handle file operation errors properly in core utils
|
||||
function writeJSON(filepath, data) {
|
||||
try {
|
||||
// Ensure directory exists (example)
|
||||
const dir = path.dirname(filepath);
|
||||
if (!fs.existsSync(dir)) {
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
}
|
||||
fs.writeFileSync(filepath, JSON.stringify(data, null, 2));
|
||||
} catch (error) {
|
||||
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
||||
@@ -151,7 +162,7 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
## Task-Specific Utilities
|
||||
## Task-Specific Utilities (in `scripts/modules/utils.js`)
|
||||
|
||||
- **Task ID Formatting**:
|
||||
- ✅ DO: Create utilities for consistent ID handling
|
||||
@@ -224,7 +235,7 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
## Cycle Detection
|
||||
## Cycle Detection (in `scripts/modules/utils.js`)
|
||||
|
||||
- **Graph Algorithms**:
|
||||
- ✅ DO: Implement cycle detection using graph traversal
|
||||
@@ -273,84 +284,68 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
## MCP Server Utilities (`mcp-server/src/tools/utils.js`)
|
||||
## MCP Server Core Utilities (`mcp-server/src/core/utils/`)
|
||||
|
||||
- **Purpose**: These utilities specifically support the MCP server tools, handling communication patterns and data formatting for MCP clients. Refer to [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for usage patterns.
|
||||
### Project Root and Task File Path Detection (`path-utils.js`)
|
||||
|
||||
-(See also: [`tests.mdc`](mdc:.cursor/rules/tests.mdc) for testing these utilities)
|
||||
- **Purpose**: This module ([`mcp-server/src/core/utils/path-utils.js`](mdc:mcp-server/src/core/utils/path-utils.js)) provides the mechanism for locating the user's `tasks.json` file, used by direct functions.
|
||||
- **`findTasksJsonPath(args, log)`**:
|
||||
- ✅ **DO**: Call this function from within **direct function wrappers** (e.g., `listTasksDirect` in `mcp-server/src/core/direct-functions/`) to get the absolute path to the relevant `tasks.json`.
|
||||
- Pass the *entire `args` object* received by the MCP tool (which should include `projectRoot` derived from the session) and the `log` object.
|
||||
- Implements a **simplified precedence system** for finding the `tasks.json` path:
|
||||
1. Explicit `projectRoot` passed in `args` (Expected from MCP tools).
|
||||
2. Cached `lastFoundProjectRoot` (CLI fallback).
|
||||
3. Search upwards from `process.cwd()` (CLI fallback).
|
||||
- Throws a specific error if the `tasks.json` file cannot be located.
|
||||
- Updates the `lastFoundProjectRoot` cache on success.
|
||||
- **`PROJECT_MARKERS`**: An exported array of common file/directory names used to identify a likely project root during the CLI fallback search.
|
||||
- **`getPackagePath()`**: Utility to find the installation path of the `task-master-ai` package itself (potentially removable).
|
||||
|
||||
- **`getProjectRoot(projectRootRaw, log)`**:
|
||||
- Normalizes a potentially relative project root path into an absolute path.
|
||||
- Defaults to `process.cwd()` if `projectRootRaw` is not provided.
|
||||
- Primarily used *internally* by `executeMCPToolAction` and `executeTaskMasterCommand`. Tools usually don't need to call this directly.
|
||||
## MCP Server Tool Utilities (`mcp-server/src/tools/utils.js`)
|
||||
|
||||
- **`executeMCPToolAction({ actionFn, args, log, actionName, processResult })`**:
|
||||
- ✅ **DO**: Use this as the main wrapper inside an MCP tool's `execute` method when calling a direct function wrapper.
|
||||
- Handles standard workflow: logs action start, normalizes `projectRoot`, calls the `actionFn` (e.g., `listTasksDirect`), processes the result (using `handleApiResult`), logs success/error, and returns a formatted MCP response (`createContentResponse`/`createErrorResponse`).
|
||||
- Simplifies tool implementation significantly by handling boilerplate.
|
||||
- Accepts an optional `processResult` function to customize data filtering/transformation before sending the response (defaults to `processMCPResponseData`).
|
||||
- **Purpose**: These utilities specifically support the MCP server tools ([`mcp-server/src/tools/*.js`](mdc:mcp-server/src/tools/*.js)), handling MCP communication patterns, response formatting, caching integration, and the CLI fallback mechanism.
|
||||
- **Refer to [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc)** for detailed usage patterns within the MCP tool `execute` methods and direct function wrappers.
|
||||
|
||||
- **`getProjectRootFromSession(session, log)`**:
|
||||
- ✅ **DO**: Call this utility **within the MCP tool's `execute` method** to extract the project root path from the `session` object.
|
||||
- Decodes the `file://` URI and handles potential errors.
|
||||
- Returns the project path string or `null`.
|
||||
- The returned path should then be passed in the `args` object when calling the corresponding `*Direct` function (e.g., `yourDirectFunction({ ...args, projectRoot: rootFolder }, log)`).
|
||||
|
||||
- **`handleApiResult(result, log, errorPrefix, processFunction)`**:
|
||||
- Takes the standard `{ success, data/error }` object returned by direct function wrappers (like `listTasksDirect`).
|
||||
- Checks the `success` flag.
|
||||
- If successful, processes the `data` using `processFunction` (defaults to `processMCPResponseData`).
|
||||
- Returns a formatted MCP response object using `createContentResponse` or `createErrorResponse`.
|
||||
- Typically called *internally* by `executeMCPToolAction`.
|
||||
- ✅ **DO**: Call this from the MCP tool's `execute` method after receiving the result from the `*Direct` function wrapper.
|
||||
- Takes the standard `{ success, data/error, fromCache }` object.
|
||||
- Formats the standard MCP success or error response, including the `fromCache` flag.
|
||||
- Uses `processMCPResponseData` by default to filter response data.
|
||||
|
||||
- **`executeTaskMasterCommand(command, log, args, projectRootRaw)`**:
|
||||
- Executes a Task Master command using `child_process.spawnSync`.
|
||||
- Tries the global `task-master` command first, then falls back to `node scripts/dev.js`.
|
||||
- Handles project root normalization internally.
|
||||
- Returns `{ success, stdout, stderr }` or `{ success: false, error }`.
|
||||
- ❌ **DON'T**: Use this as the primary method for MCP tools. Prefer `executeMCPToolAction` with direct function calls. Use only as a fallback for commands not yet refactored or those requiring CLI execution.
|
||||
- Executes a Task Master CLI command as a child process.
|
||||
- Handles fallback between global `task-master` and local `node scripts/dev.js`.
|
||||
- ❌ **DON'T**: Use this as the primary method for MCP tools. Prefer direct function calls via `*Direct` wrappers.
|
||||
|
||||
- **`processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testStrategy'])`**:
|
||||
- Filters task data before sending it to the MCP client.
|
||||
- By default, removes the `details` and `testStrategy` fields from task objects and their subtasks to reduce payload size.
|
||||
- Can handle single task objects or data structures containing a `tasks` array (like from `listTasks`).
|
||||
- This is the default processor used by `executeMCPToolAction`.
|
||||
- **`processMCPResponseData(taskOrData, fieldsToRemove)`**:
|
||||
- Filters task data (e.g., removing `details`, `testStrategy`) before sending to the MCP client. Called by `handleApiResult`.
|
||||
|
||||
```javascript
|
||||
// Example usage (typically done inside executeMCPToolAction):
|
||||
const rawResult = { success: true, data: { tasks: [ { id: 1, title: '...', details: '...', subtasks: [...] } ] } };
|
||||
const filteredData = processMCPResponseData(rawResult.data);
|
||||
// filteredData.tasks[0] will NOT have the 'details' field.
|
||||
```
|
||||
|
||||
- **`createContentResponse(content)`**:
|
||||
- ✅ **DO**: Use this (usually via `handleApiResult` or `executeMCPToolAction`) to format successful MCP responses.
|
||||
- Wraps the `content` (stringifies objects to JSON) in the standard FastMCP `{ content: [{ type: "text", text: ... }] }` structure.
|
||||
|
||||
- **`createErrorResponse(errorMessage)`**:
|
||||
- ✅ **DO**: Use this (usually via `handleApiResult` or `executeMCPToolAction`) to format error responses for MCP.
|
||||
- Wraps the `errorMessage` in the standard FastMCP error structure, including `isError: true`.
|
||||
- **`createContentResponse(content)` / `createErrorResponse(errorMessage)`**:
|
||||
- Formatters for standard MCP success/error responses.
|
||||
|
||||
- **`getCachedOrExecute({ cacheKey, actionFn, log })`**:
|
||||
- ✅ **DO**: Use this utility *inside direct function wrappers* (like `listTasksDirect` in `task-master-core.js`) to implement caching for MCP operations.
|
||||
- Checks the `ContextManager` cache using `cacheKey`.
|
||||
- If a hit occurs, returns the cached result directly.
|
||||
- If a miss occurs, it executes the provided `actionFn` (which should be an async function returning `{ success, data/error }`).
|
||||
- If `actionFn` succeeds, its result is stored in the cache under `cacheKey`.
|
||||
- Returns the result (either cached or fresh) wrapped in the standard structure `{ success, data/error, fromCache: boolean }`.
|
||||
|
||||
- **`executeMCPToolAction({ actionFn, args, log, actionName, processResult })`**:
|
||||
- Update: While this function *can* technically coordinate caching if provided a `cacheKeyGenerator`, the current preferred pattern involves implementing caching *within* the `actionFn` (the direct wrapper) using `getCachedOrExecute`. `executeMCPToolAction` primarily orchestrates the call to `actionFn` and handles processing its result (including the `fromCache` flag) via `handleApiResult`.
|
||||
|
||||
- **`handleApiResult(result, log, errorPrefix, processFunction)`**:
|
||||
- Update: Now expects the `result` object to potentially contain a `fromCache` boolean flag. If present, this flag is included in the final response payload generated by `createContentResponse` (e.g., `{ fromCache: true, data: ... }`).
|
||||
- ✅ **DO**: Use this utility *inside direct function wrappers* to implement caching.
|
||||
- Checks cache, executes `actionFn` on miss, stores result.
|
||||
- Returns standard `{ success, data/error, fromCache: boolean }`.
|
||||
|
||||
## Export Organization
|
||||
|
||||
- **Grouping Related Functions**:
|
||||
- ✅ DO: Keep utilities relevant to their location (e.g., core utils in `scripts/modules/utils.js`, MCP utils in `mcp-server/src/tools/utils.js`).
|
||||
- ✅ DO: Keep utilities relevant to their location (e.g., core CLI utils in `scripts/modules/utils.js`, MCP path utils in `mcp-server/src/core/utils/path-utils.js`, MCP tool utils in `mcp-server/src/tools/utils.js`).
|
||||
- ✅ DO: Export all utility functions in a single statement per file.
|
||||
- ✅ DO: Group related exports together.
|
||||
- ✅ DO: Export configuration constants.
|
||||
- ✅ DO: Export configuration constants (from `scripts/modules/utils.js`).
|
||||
- ❌ DON'T: Use default exports.
|
||||
- ❌ DON'T: Create circular dependencies between utility files or between utilities and the modules that use them (See [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc)).
|
||||
- ❌ DON'T: Create circular dependencies (See [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc)).
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Organize exports logically
|
||||
// Example export from scripts/modules/utils.js
|
||||
export {
|
||||
// Configuration
|
||||
CONFIG,
|
||||
@@ -368,15 +363,31 @@ alwaysApply: false
|
||||
truncate,
|
||||
|
||||
// Task utilities
|
||||
readComplexityReport,
|
||||
findTaskInComplexityReport,
|
||||
taskExists,
|
||||
formatTaskId,
|
||||
findTaskById,
|
||||
// ... (taskExists, formatTaskId, findTaskById, etc.)
|
||||
|
||||
// Graph algorithms
|
||||
findCycles,
|
||||
};
|
||||
|
||||
// Example export from mcp-server/src/core/utils/path-utils.js
|
||||
export {
|
||||
findTasksJsonPath,
|
||||
getPackagePath,
|
||||
PROJECT_MARKERS,
|
||||
lastFoundProjectRoot // Exporting for potential direct use/reset if needed
|
||||
};
|
||||
|
||||
// Example export from mcp-server/src/tools/utils.js
|
||||
export {
|
||||
getProjectRoot,
|
||||
getProjectRootFromSession,
|
||||
handleApiResult,
|
||||
executeTaskMasterCommand,
|
||||
processMCPResponseData,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
getCachedOrExecute
|
||||
};
|
||||
```
|
||||
|
||||
Refer to [`utils.js`](mdc:scripts/modules/utils.js) for implementation examples and [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for integration guidelines. Use [`commands.mdc`](mdc:.cursor/rules/commands.mdc) for CLI integration details.
|
||||
Refer to [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) and [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc) for more context on MCP server architecture and integration.
|
||||
@@ -57,7 +57,16 @@ This will prompt you for project details and set up a new project with the neces
|
||||
|
||||
### Important Notes
|
||||
|
||||
1. This package uses ES modules. Your package.json should include `"type": "module"`.
|
||||
1. **ES Modules Configuration:**
|
||||
- This project uses ES Modules (ESM) instead of CommonJS.
|
||||
- This is set via `"type": "module"` in your package.json.
|
||||
- Use `import/export` syntax instead of `require()`.
|
||||
- Files should use `.js` or `.mjs` extensions.
|
||||
- To use a CommonJS module, either:
|
||||
- Rename it with `.cjs` extension
|
||||
- Use `await import()` for dynamic imports
|
||||
- If you need CommonJS throughout your project, remove `"type": "module"` from package.json, but Task Master scripts expect ESM.
|
||||
|
||||
2. The Anthropic SDK version should be 0.39.0 or higher.
|
||||
|
||||
## Quick Start with Global Commands
|
||||
|
||||
15
README.md
15
README.md
@@ -406,6 +406,21 @@ task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --resea
|
||||
|
||||
Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content.
|
||||
|
||||
### Remove Task
|
||||
|
||||
```bash
|
||||
# Remove a task permanently
|
||||
task-master remove-task --id=<id>
|
||||
|
||||
# Remove a subtask permanently
|
||||
task-master remove-task --id=<parentId.subtaskId>
|
||||
|
||||
# Skip the confirmation prompt
|
||||
task-master remove-task --id=<id> --yes
|
||||
```
|
||||
|
||||
The `remove-task` command permanently deletes a task or subtask from `tasks.json`. It also automatically cleans up any references to the deleted task in other tasks' dependencies. Consider using 'blocked', 'cancelled', or 'deferred' status instead if you want to keep the task for reference.
|
||||
|
||||
### Generate Task Files
|
||||
|
||||
```bash
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
# Required
|
||||
ANTHROPIC_API_KEY=your-api-key-here # Format: sk-ant-api03-...
|
||||
PERPLEXITY_API_KEY=pplx-abcde # For research (recommended but optional)
|
||||
ANTHROPIC_API_KEY=your-api-key-here # For most AI ops -- Format: sk-ant-api03-... (Required)
|
||||
PERPLEXITY_API_KEY=pplx-abcde # For research -- Format: pplx-abcde (Optional, Highly Recommended)
|
||||
|
||||
# Optional - defaults shown
|
||||
MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229
|
||||
PERPLEXITY_MODEL=sonar-pro # Make sure you have access to sonar-pro otherwise you can use sonar regular.
|
||||
MAX_TOKENS=4000 # Maximum tokens for model responses
|
||||
TEMPERATURE=0.7 # Temperature for model responses (0.0-1.0)
|
||||
MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229 (Required)
|
||||
PERPLEXITY_MODEL=sonar-pro # Make sure you have access to sonar-pro otherwise you can use sonar regular (Optional)
|
||||
MAX_TOKENS=64000 # Maximum tokens for model responses (Required)
|
||||
TEMPERATURE=0.2 # Temperature for model responses (0.0-1.0) - lower = less creativity and follow your prompt closely (Required)
|
||||
DEBUG=false # Enable debug logging (true/false)
|
||||
LOG_LEVEL=info # Log level (debug, info, warn, error)
|
||||
DEFAULT_SUBTASKS=3 # Default number of subtasks when expanding
|
||||
DEFAULT_SUBTASKS=5 # Default number of subtasks when expanding
|
||||
DEFAULT_PRIORITY=medium # Default priority for generated tasks (high, medium, low)
|
||||
PROJECT_NAME={{projectName}} # Project name for tasks.json metadata
|
||||
258
docs/ai-client-utils-example.md
Normal file
258
docs/ai-client-utils-example.md
Normal file
@@ -0,0 +1,258 @@
|
||||
# AI Client Utilities for MCP Tools
|
||||
|
||||
This document provides examples of how to use the new AI client utilities with AsyncOperationManager in MCP tools.
|
||||
|
||||
## Basic Usage with Direct Functions
|
||||
|
||||
```javascript
|
||||
// In your direct function implementation:
|
||||
import {
|
||||
getAnthropicClientForMCP,
|
||||
getModelConfig,
|
||||
handleClaudeError
|
||||
} from '../utils/ai-client-utils.js';
|
||||
|
||||
export async function someAiOperationDirect(args, log, context) {
|
||||
try {
|
||||
// Initialize Anthropic client with session from context
|
||||
const client = getAnthropicClientForMCP(context.session, log);
|
||||
|
||||
// Get model configuration with defaults or session overrides
|
||||
const modelConfig = getModelConfig(context.session);
|
||||
|
||||
// Make API call with proper error handling
|
||||
try {
|
||||
const response = await client.messages.create({
|
||||
model: modelConfig.model,
|
||||
max_tokens: modelConfig.maxTokens,
|
||||
temperature: modelConfig.temperature,
|
||||
messages: [
|
||||
{ role: 'user', content: 'Your prompt here' }
|
||||
]
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: response
|
||||
};
|
||||
} catch (apiError) {
|
||||
// Use helper to get user-friendly error message
|
||||
const friendlyMessage = handleClaudeError(apiError);
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'AI_API_ERROR',
|
||||
message: friendlyMessage
|
||||
}
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Handle client initialization errors
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'AI_CLIENT_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Integration with AsyncOperationManager
|
||||
|
||||
```javascript
|
||||
// In your MCP tool implementation:
|
||||
import { AsyncOperationManager, StatusCodes } from '../../utils/async-operation-manager.js';
|
||||
import { someAiOperationDirect } from '../../core/direct-functions/some-ai-operation.js';
|
||||
|
||||
export async function someAiOperation(args, context) {
|
||||
const { session, mcpLog } = context;
|
||||
const log = mcpLog || console;
|
||||
|
||||
try {
|
||||
// Create operation description
|
||||
const operationDescription = `AI operation: ${args.someParam}`;
|
||||
|
||||
// Start async operation
|
||||
const operation = AsyncOperationManager.createOperation(
|
||||
operationDescription,
|
||||
async (reportProgress) => {
|
||||
try {
|
||||
// Initial progress report
|
||||
reportProgress({
|
||||
progress: 0,
|
||||
status: 'Starting AI operation...'
|
||||
});
|
||||
|
||||
// Call direct function with session and progress reporting
|
||||
const result = await someAiOperationDirect(
|
||||
args,
|
||||
log,
|
||||
{
|
||||
reportProgress,
|
||||
mcpLog: log,
|
||||
session
|
||||
}
|
||||
);
|
||||
|
||||
// Final progress update
|
||||
reportProgress({
|
||||
progress: 100,
|
||||
status: result.success ? 'Operation completed' : 'Operation failed',
|
||||
result: result.data,
|
||||
error: result.error
|
||||
});
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
// Handle errors in the operation
|
||||
reportProgress({
|
||||
progress: 100,
|
||||
status: 'Operation failed',
|
||||
error: {
|
||||
message: error.message,
|
||||
code: error.code || 'OPERATION_FAILED'
|
||||
}
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Return immediate response with operation ID
|
||||
return {
|
||||
status: StatusCodes.ACCEPTED,
|
||||
body: {
|
||||
success: true,
|
||||
message: 'Operation started',
|
||||
operationId: operation.id
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Handle errors in the MCP tool
|
||||
log.error(`Error in someAiOperation: ${error.message}`);
|
||||
return {
|
||||
status: StatusCodes.INTERNAL_SERVER_ERROR,
|
||||
body: {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'OPERATION_FAILED',
|
||||
message: error.message
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Using Research Capabilities with Perplexity
|
||||
|
||||
```javascript
|
||||
// In your direct function:
|
||||
import {
|
||||
getPerplexityClientForMCP,
|
||||
getBestAvailableAIModel
|
||||
} from '../utils/ai-client-utils.js';
|
||||
|
||||
export async function researchOperationDirect(args, log, context) {
|
||||
try {
|
||||
// Get the best AI model for this operation based on needs
|
||||
const { type, client } = await getBestAvailableAIModel(
|
||||
context.session,
|
||||
{ requiresResearch: true },
|
||||
log
|
||||
);
|
||||
|
||||
// Report which model we're using
|
||||
if (context.reportProgress) {
|
||||
await context.reportProgress({
|
||||
progress: 10,
|
||||
status: `Using ${type} model for research...`
|
||||
});
|
||||
}
|
||||
|
||||
// Make API call based on the model type
|
||||
if (type === 'perplexity') {
|
||||
// Call Perplexity
|
||||
const response = await client.chat.completions.create({
|
||||
model: context.session?.env?.PERPLEXITY_MODEL || 'sonar-medium-online',
|
||||
messages: [
|
||||
{ role: 'user', content: args.researchQuery }
|
||||
],
|
||||
temperature: 0.1
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: response.choices[0].message.content
|
||||
};
|
||||
} else {
|
||||
// Call Claude as fallback
|
||||
// (Implementation depends on specific needs)
|
||||
// ...
|
||||
}
|
||||
} catch (error) {
|
||||
// Handle errors
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'RESEARCH_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Model Configuration Override Example
|
||||
|
||||
```javascript
|
||||
// In your direct function:
|
||||
import { getModelConfig } from '../utils/ai-client-utils.js';
|
||||
|
||||
// Using custom defaults for a specific operation
|
||||
const operationDefaults = {
|
||||
model: 'claude-3-haiku-20240307', // Faster, smaller model
|
||||
maxTokens: 1000, // Lower token limit
|
||||
temperature: 0.2 // Lower temperature for more deterministic output
|
||||
};
|
||||
|
||||
// Get model config with operation-specific defaults
|
||||
const modelConfig = getModelConfig(context.session, operationDefaults);
|
||||
|
||||
// Now use modelConfig in your API calls
|
||||
const response = await client.messages.create({
|
||||
model: modelConfig.model,
|
||||
max_tokens: modelConfig.maxTokens,
|
||||
temperature: modelConfig.temperature,
|
||||
// Other parameters...
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Error Handling**:
|
||||
- Always use try/catch blocks around both client initialization and API calls
|
||||
- Use `handleClaudeError` to provide user-friendly error messages
|
||||
- Return standardized error objects with code and message
|
||||
|
||||
2. **Progress Reporting**:
|
||||
- Report progress at key points (starting, processing, completing)
|
||||
- Include meaningful status messages
|
||||
- Include error details in progress reports when failures occur
|
||||
|
||||
3. **Session Handling**:
|
||||
- Always pass the session from the context to the AI client getters
|
||||
- Use `getModelConfig` to respect user settings from session
|
||||
|
||||
4. **Model Selection**:
|
||||
- Use `getBestAvailableAIModel` when you need to select between different models
|
||||
- Set `requiresResearch: true` when you need Perplexity capabilities
|
||||
|
||||
5. **AsyncOperationManager Integration**:
|
||||
- Create descriptive operation names
|
||||
- Handle all errors within the operation function
|
||||
- Return standardized results from direct functions
|
||||
- Return immediate responses with operation IDs
|
||||
1179
docs/fastmcp-core.txt
Normal file
1179
docs/fastmcp-core.txt
Normal file
File diff suppressed because it is too large
Load Diff
41
entries.json
Normal file
41
entries.json
Normal file
@@ -0,0 +1,41 @@
|
||||
import os
|
||||
import json
|
||||
|
||||
# Path to Cursor's history folder
|
||||
history_path = os.path.expanduser('~/Library/Application Support/Cursor/User/History')
|
||||
|
||||
# File to search for
|
||||
target_file = 'tasks/tasks.json'
|
||||
|
||||
# Function to search through all entries.json files
|
||||
def search_entries_for_file(history_path, target_file):
|
||||
matching_folders = []
|
||||
for folder in os.listdir(history_path):
|
||||
folder_path = os.path.join(history_path, folder)
|
||||
if not os.path.isdir(folder_path):
|
||||
continue
|
||||
|
||||
# Look for entries.json
|
||||
entries_file = os.path.join(folder_path, 'entries.json')
|
||||
if not os.path.exists(entries_file):
|
||||
continue
|
||||
|
||||
# Parse entries.json to find the resource key
|
||||
with open(entries_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
resource = data.get('resource', None)
|
||||
if resource and target_file in resource:
|
||||
matching_folders.append(folder_path)
|
||||
|
||||
return matching_folders
|
||||
|
||||
# Search for the target file
|
||||
matching_folders = search_entries_for_file(history_path, target_file)
|
||||
|
||||
# Output the matching folders
|
||||
if matching_folders:
|
||||
print(f"Found {target_file} in the following folders:")
|
||||
for folder in matching_folders:
|
||||
print(folder)
|
||||
else:
|
||||
print(f"No matches found for {target_file}.")
|
||||
85
mcp-server/src/core/direct-functions/add-dependency.js
Normal file
85
mcp-server/src/core/direct-functions/add-dependency.js
Normal file
@@ -0,0 +1,85 @@
|
||||
/**
|
||||
* add-dependency.js
|
||||
* Direct function implementation for adding a dependency to a task
|
||||
*/
|
||||
|
||||
import { addDependency } from '../../../../scripts/modules/dependency-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for addDependency with error handling.
|
||||
*
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {string|number} args.id - Task ID to add dependency to
|
||||
* @param {string|number} args.dependsOn - Task ID that will become a dependency
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<Object>} - Result object with success status and data/error information
|
||||
*/
|
||||
export async function addDependencyDirect(args, log) {
|
||||
try {
|
||||
log.info(`Adding dependency with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Validate required parameters
|
||||
if (!args.id) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Task ID (id) is required'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (!args.dependsOn) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Dependency ID (dependsOn) is required'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Format IDs for the core function
|
||||
const taskId = args.id.includes && args.id.includes('.') ? args.id : parseInt(args.id, 10);
|
||||
const dependencyId = args.dependsOn.includes && args.dependsOn.includes('.') ? args.dependsOn : parseInt(args.dependsOn, 10);
|
||||
|
||||
log.info(`Adding dependency: task ${taskId} will depend on ${dependencyId}`);
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the core function
|
||||
await addDependency(tasksPath, taskId, dependencyId);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,
|
||||
taskId: taskId,
|
||||
dependencyId: dependencyId
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in addDependencyDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
128
mcp-server/src/core/direct-functions/add-subtask.js
Normal file
128
mcp-server/src/core/direct-functions/add-subtask.js
Normal file
@@ -0,0 +1,128 @@
|
||||
/**
|
||||
* Direct function wrapper for addSubtask
|
||||
*/
|
||||
|
||||
import { addSubtask } from '../../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Add a subtask to an existing task
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {string} args.id - Parent task ID
|
||||
* @param {string} [args.taskId] - Existing task ID to convert to subtask (optional)
|
||||
* @param {string} [args.title] - Title for new subtask (when creating a new subtask)
|
||||
* @param {string} [args.description] - Description for new subtask
|
||||
* @param {string} [args.details] - Implementation details for new subtask
|
||||
* @param {string} [args.status] - Status for new subtask (default: 'pending')
|
||||
* @param {string} [args.dependencies] - Comma-separated list of dependency IDs
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {boolean} [args.skipGenerate] - Skip regenerating task files
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: string}>}
|
||||
*/
|
||||
export async function addSubtaskDirect(args, log) {
|
||||
try {
|
||||
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
||||
|
||||
if (!args.id) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Parent task ID is required'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Either taskId or title must be provided
|
||||
if (!args.taskId && !args.title) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Either taskId or title must be provided'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Parse dependencies if provided
|
||||
let dependencies = [];
|
||||
if (args.dependencies) {
|
||||
dependencies = args.dependencies.split(',').map(id => {
|
||||
// Handle both regular IDs and dot notation
|
||||
return id.includes('.') ? id.trim() : parseInt(id.trim(), 10);
|
||||
});
|
||||
}
|
||||
|
||||
// Convert existingTaskId to a number if provided
|
||||
const existingTaskId = args.taskId ? parseInt(args.taskId, 10) : null;
|
||||
|
||||
// Convert parent ID to a number
|
||||
const parentId = parseInt(args.id, 10);
|
||||
|
||||
// Determine if we should generate files
|
||||
const generateFiles = !args.skipGenerate;
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Case 1: Convert existing task to subtask
|
||||
if (existingTaskId) {
|
||||
log.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);
|
||||
const result = await addSubtask(tasksPath, parentId, existingTaskId, null, generateFiles);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,
|
||||
subtask: result
|
||||
}
|
||||
};
|
||||
}
|
||||
// Case 2: Create new subtask
|
||||
else {
|
||||
log.info(`Creating new subtask for parent task ${parentId}`);
|
||||
|
||||
const newSubtaskData = {
|
||||
title: args.title,
|
||||
description: args.description || '',
|
||||
details: args.details || '',
|
||||
status: args.status || 'pending',
|
||||
dependencies: dependencies
|
||||
};
|
||||
|
||||
const result = await addSubtask(tasksPath, parentId, null, newSubtaskData, generateFiles);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `New subtask ${parentId}.${result.id} successfully created`,
|
||||
subtask: result
|
||||
}
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in addSubtaskDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
84
mcp-server/src/core/direct-functions/add-task.js
Normal file
84
mcp-server/src/core/direct-functions/add-task.js
Normal file
@@ -0,0 +1,84 @@
|
||||
/**
|
||||
* add-task.js
|
||||
* Direct function implementation for adding a new task
|
||||
*/
|
||||
|
||||
import { addTask } from '../../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for adding a new task with error handling.
|
||||
*
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {string} args.prompt - Description of the task to add
|
||||
* @param {Array<number>} [args.dependencies=[]] - Task dependencies as array of IDs
|
||||
* @param {string} [args.priority='medium'] - Task priority (high, medium, low)
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }
|
||||
*/
|
||||
export async function addTaskDirect(args, log) {
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Check required parameters
|
||||
if (!args.prompt) {
|
||||
log.error('Missing required parameter: prompt');
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'MISSING_PARAMETER',
|
||||
message: 'The prompt parameter is required for adding a task'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Extract and prepare parameters
|
||||
const prompt = args.prompt;
|
||||
const dependencies = Array.isArray(args.dependencies)
|
||||
? args.dependencies
|
||||
: (args.dependencies ? String(args.dependencies).split(',').map(id => parseInt(id.trim(), 10)) : []);
|
||||
const priority = args.priority || 'medium';
|
||||
|
||||
log.info(`Adding new task with prompt: "${prompt}", dependencies: [${dependencies.join(', ')}], priority: ${priority}`);
|
||||
|
||||
// Call the addTask function with 'json' outputFormat to prevent console output when called via MCP
|
||||
const newTaskId = await addTask(
|
||||
tasksPath,
|
||||
prompt,
|
||||
dependencies,
|
||||
priority,
|
||||
{ mcpLog: log },
|
||||
'json'
|
||||
);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
taskId: newTaskId,
|
||||
message: `Successfully added new task #${newTaskId}`
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in addTaskDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'ADD_TASK_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
101
mcp-server/src/core/direct-functions/analyze-task-complexity.js
Normal file
101
mcp-server/src/core/direct-functions/analyze-task-complexity.js
Normal file
@@ -0,0 +1,101 @@
|
||||
/**
|
||||
* Direct function wrapper for analyzeTaskComplexity
|
||||
*/
|
||||
|
||||
import { analyzeTaskComplexity } from '../../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
|
||||
/**
|
||||
* Analyze task complexity and generate recommendations
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {string} [args.output] - Output file path for the report
|
||||
* @param {string} [args.model] - LLM model to use for analysis
|
||||
* @param {string|number} [args.threshold] - Minimum complexity score to recommend expansion (1-10)
|
||||
* @param {boolean} [args.research] - Use Perplexity AI for research-backed complexity analysis
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||
*/
|
||||
export async function analyzeTaskComplexityDirect(args, log) {
|
||||
try {
|
||||
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Determine output path
|
||||
let outputPath = args.output || 'scripts/task-complexity-report.json';
|
||||
if (!path.isAbsolute(outputPath) && args.projectRoot) {
|
||||
outputPath = path.join(args.projectRoot, outputPath);
|
||||
}
|
||||
|
||||
// Create options object for analyzeTaskComplexity
|
||||
const options = {
|
||||
file: tasksPath,
|
||||
output: outputPath,
|
||||
model: args.model,
|
||||
threshold: args.threshold,
|
||||
research: args.research === true
|
||||
};
|
||||
|
||||
log.info(`Analyzing task complexity from: ${tasksPath}`);
|
||||
log.info(`Output report will be saved to: ${outputPath}`);
|
||||
|
||||
if (options.research) {
|
||||
log.info('Using Perplexity AI for research-backed complexity analysis');
|
||||
}
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the core function
|
||||
await analyzeTaskComplexity(options);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Verify the report file was created
|
||||
if (!fs.existsSync(outputPath)) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'ANALYZE_ERROR',
|
||||
message: 'Analysis completed but no report file was created'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Read the report file
|
||||
const report = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Task complexity analysis complete. Report saved to ${outputPath}`,
|
||||
reportPath: outputPath,
|
||||
reportSummary: {
|
||||
taskCount: report.length,
|
||||
highComplexityTasks: report.filter(t => t.complexityScore >= 8).length,
|
||||
mediumComplexityTasks: report.filter(t => t.complexityScore >= 5 && t.complexityScore < 8).length,
|
||||
lowComplexityTasks: report.filter(t => t.complexityScore < 5).length,
|
||||
}
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in analyzeTaskComplexityDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
32
mcp-server/src/core/direct-functions/cache-stats.js
Normal file
32
mcp-server/src/core/direct-functions/cache-stats.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* cache-stats.js
|
||||
* Direct function implementation for retrieving cache statistics
|
||||
*/
|
||||
|
||||
import { contextManager } from '../context-manager.js';
|
||||
|
||||
/**
|
||||
* Get cache statistics for monitoring
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Object} - Cache statistics
|
||||
*/
|
||||
export async function getCacheStatsDirect(args, log) {
|
||||
try {
|
||||
log.info('Retrieving cache statistics');
|
||||
const stats = contextManager.getStats();
|
||||
return {
|
||||
success: true,
|
||||
data: stats
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Error getting cache stats: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CACHE_STATS_ERROR',
|
||||
message: error.message || 'Unknown error occurred'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
112
mcp-server/src/core/direct-functions/clear-subtasks.js
Normal file
112
mcp-server/src/core/direct-functions/clear-subtasks.js
Normal file
@@ -0,0 +1,112 @@
|
||||
/**
|
||||
* Direct function wrapper for clearSubtasks
|
||||
*/
|
||||
|
||||
import { clearSubtasks } from '../../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import fs from 'fs';
|
||||
|
||||
/**
|
||||
* Clear subtasks from specified tasks
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {string} [args.id] - Task IDs (comma-separated) to clear subtasks from
|
||||
* @param {boolean} [args.all] - Clear subtasks from all tasks
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||
*/
|
||||
export async function clearSubtasksDirect(args, log) {
|
||||
try {
|
||||
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Either id or all must be provided
|
||||
if (!args.id && !args.all) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Either task IDs with id parameter or all parameter must be provided'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Check if tasks.json exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FILE_NOT_FOUND_ERROR',
|
||||
message: `Tasks file not found at ${tasksPath}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let taskIds;
|
||||
|
||||
// If all is specified, get all task IDs
|
||||
if (args.all) {
|
||||
log.info('Clearing subtasks from all tasks');
|
||||
const data = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
||||
if (!data || !data.tasks || data.tasks.length === 0) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'No valid tasks found in the tasks file'
|
||||
}
|
||||
};
|
||||
}
|
||||
taskIds = data.tasks.map(t => t.id).join(',');
|
||||
} else {
|
||||
// Use the provided task IDs
|
||||
taskIds = args.id;
|
||||
}
|
||||
|
||||
log.info(`Clearing subtasks from tasks: ${taskIds}`);
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the core function
|
||||
clearSubtasks(tasksPath, taskIds);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Read the updated data to provide a summary
|
||||
const updatedData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
||||
const taskIdArray = taskIds.split(',').map(id => parseInt(id.trim(), 10));
|
||||
|
||||
// Build a summary of what was done
|
||||
const clearedTasksCount = taskIdArray.length;
|
||||
const taskSummary = taskIdArray.map(id => {
|
||||
const task = updatedData.tasks.find(t => t.id === id);
|
||||
return task ? { id, title: task.title } : { id, title: 'Task not found' };
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully cleared subtasks from ${clearedTasksCount} task(s)`,
|
||||
tasksCleared: taskSummary
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in clearSubtasksDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
121
mcp-server/src/core/direct-functions/complexity-report.js
Normal file
121
mcp-server/src/core/direct-functions/complexity-report.js
Normal file
@@ -0,0 +1,121 @@
|
||||
/**
|
||||
* complexity-report.js
|
||||
* Direct function implementation for displaying complexity analysis report
|
||||
*/
|
||||
|
||||
import { readComplexityReport, enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import path from 'path';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
||||
*
|
||||
* @param {Object} args - Command arguments containing file path option
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<Object>} - Result object with success status and data/error information
|
||||
*/
|
||||
export async function complexityReportDirect(args, log) {
|
||||
try {
|
||||
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Get tasks file path to determine project root for the default report location
|
||||
let tasksPath;
|
||||
try {
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.warn(`Tasks file not found, using current directory: ${error.message}`);
|
||||
// Continue with default or specified report path
|
||||
}
|
||||
|
||||
// Get report file path from args or use default
|
||||
const reportPath = args.file || path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
|
||||
|
||||
log.info(`Looking for complexity report at: ${reportPath}`);
|
||||
|
||||
// Generate cache key based on report path
|
||||
const cacheKey = `complexityReport:${reportPath}`;
|
||||
|
||||
// Define the core action function to read the report
|
||||
const coreActionFn = async () => {
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
const report = readComplexityReport(reportPath);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
if (!report) {
|
||||
log.warn(`No complexity report found at ${reportPath}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FILE_NOT_FOUND_ERROR',
|
||||
message: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
report,
|
||||
reportPath
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error reading complexity report: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'READ_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreActionFn,
|
||||
log
|
||||
});
|
||||
log.info(`complexityReportDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch (error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself
|
||||
// Ensure silent mode is disabled
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Ensure silent mode is disabled if an outer error occurs
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in complexityReportDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
86
mcp-server/src/core/direct-functions/expand-all-tasks.js
Normal file
86
mcp-server/src/core/direct-functions/expand-all-tasks.js
Normal file
@@ -0,0 +1,86 @@
|
||||
/**
|
||||
* Direct function wrapper for expandAllTasks
|
||||
*/
|
||||
|
||||
import { expandAllTasks } from '../../../../scripts/modules/task-manager.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Expand all pending tasks with subtasks
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {number|string} [args.num] - Number of subtasks to generate
|
||||
* @param {boolean} [args.research] - Enable Perplexity AI for research-backed subtask generation
|
||||
* @param {string} [args.prompt] - Additional context to guide subtask generation
|
||||
* @param {boolean} [args.force] - Force regeneration of subtasks for tasks that already have them
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||
*/
|
||||
export async function expandAllTasksDirect(args, log) {
|
||||
try {
|
||||
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Parse parameters
|
||||
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
||||
const useResearch = args.research === true;
|
||||
const additionalContext = args.prompt || '';
|
||||
const forceFlag = args.force === true;
|
||||
|
||||
log.info(`Expanding all tasks with ${numSubtasks || 'default'} subtasks each...`);
|
||||
if (useResearch) {
|
||||
log.info('Using Perplexity AI for research-backed subtask generation');
|
||||
}
|
||||
if (additionalContext) {
|
||||
log.info(`Additional context: "${additionalContext}"`);
|
||||
}
|
||||
if (forceFlag) {
|
||||
log.info('Force regeneration of subtasks is enabled');
|
||||
}
|
||||
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the core function
|
||||
await expandAllTasks(numSubtasks, useResearch, additionalContext, forceFlag);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// The expandAllTasks function doesn't have a return value, so we'll create our own success response
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: "Successfully expanded all pending tasks with subtasks",
|
||||
details: {
|
||||
numSubtasks: numSubtasks,
|
||||
research: useResearch,
|
||||
prompt: additionalContext,
|
||||
force: forceFlag
|
||||
}
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
throw error; // Rethrow to be caught by outer catch block
|
||||
}
|
||||
} catch (error) {
|
||||
// Ensure silent mode is disabled
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in expandAllTasksDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
174
mcp-server/src/core/direct-functions/expand-task.js
Normal file
174
mcp-server/src/core/direct-functions/expand-task.js
Normal file
@@ -0,0 +1,174 @@
|
||||
/**
|
||||
* expand-task.js
|
||||
* Direct function implementation for expanding a task into subtasks
|
||||
*/
|
||||
|
||||
import { expandTask } from '../../../../scripts/modules/task-manager.js';
|
||||
import { readJSON, writeJSON, enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for expanding a task into subtasks with error handling.
|
||||
*
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||
*/
|
||||
export async function expandTaskDirect(args, log) {
|
||||
let tasksPath;
|
||||
try {
|
||||
// Find the tasks path first
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.error(`Tasks file not found: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FILE_NOT_FOUND_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Validate task ID
|
||||
const taskId = args.id ? parseInt(args.id, 10) : null;
|
||||
if (!taskId) {
|
||||
log.error('Task ID is required');
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Task ID is required'
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Process other parameters
|
||||
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
||||
const useResearch = args.research === true;
|
||||
const additionalContext = args.prompt || '';
|
||||
const force = args.force === true;
|
||||
|
||||
try {
|
||||
log.info(`Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}, Force: ${force}`);
|
||||
|
||||
// Read tasks data
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_TASKS_FILE',
|
||||
message: `No valid tasks found in ${tasksPath}`
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Find the specific task
|
||||
const task = data.tasks.find(t => t.id === taskId);
|
||||
|
||||
if (!task) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'TASK_NOT_FOUND',
|
||||
message: `Task with ID ${taskId} not found`
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Check if task is completed
|
||||
if (task.status === 'done' || task.status === 'completed') {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'TASK_COMPLETED',
|
||||
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Check for existing subtasks
|
||||
const hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;
|
||||
|
||||
// Keep a copy of the task before modification
|
||||
const originalTask = JSON.parse(JSON.stringify(task));
|
||||
|
||||
// Tracking subtasks count before expansion
|
||||
const subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;
|
||||
|
||||
// Create a backup of the tasks.json file
|
||||
const backupPath = path.join(path.dirname(tasksPath), 'tasks.json.bak');
|
||||
fs.copyFileSync(tasksPath, backupPath);
|
||||
|
||||
// Directly modify the data instead of calling the CLI function
|
||||
if (!task.subtasks) {
|
||||
task.subtasks = [];
|
||||
}
|
||||
|
||||
// Save tasks.json with potentially empty subtasks array
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Process the request
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call expandTask
|
||||
const result = await expandTask(taskId, numSubtasks, useResearch, additionalContext);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Read the updated data
|
||||
const updatedData = readJSON(tasksPath);
|
||||
const updatedTask = updatedData.tasks.find(t => t.id === taskId);
|
||||
|
||||
// Calculate how many subtasks were added
|
||||
const subtasksAdded = updatedTask.subtasks ?
|
||||
updatedTask.subtasks.length - subtasksCountBefore : 0;
|
||||
|
||||
// Return the result
|
||||
log.info(`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`);
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
task: updatedTask,
|
||||
subtasksAdded,
|
||||
hasExistingSubtasks
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error expanding task: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message || 'Failed to expand task'
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error expanding task: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message || 'Failed to expand task'
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
65
mcp-server/src/core/direct-functions/fix-dependencies.js
Normal file
65
mcp-server/src/core/direct-functions/fix-dependencies.js
Normal file
@@ -0,0 +1,65 @@
|
||||
/**
|
||||
* Direct function wrapper for fixDependenciesCommand
|
||||
*/
|
||||
|
||||
import { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import fs from 'fs';
|
||||
|
||||
/**
|
||||
* Fix invalid dependencies in tasks.json automatically
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||
*/
|
||||
export async function fixDependenciesDirect(args, log) {
|
||||
try {
|
||||
log.info(`Fixing invalid dependencies in tasks...`);
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Verify the file exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FILE_NOT_FOUND',
|
||||
message: `Tasks file not found at ${tasksPath}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the original command function
|
||||
await fixDependenciesCommand(tasksPath);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: 'Dependencies fixed successfully',
|
||||
tasksPath
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error fixing dependencies: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FIX_DEPENDENCIES_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
87
mcp-server/src/core/direct-functions/generate-task-files.js
Normal file
87
mcp-server/src/core/direct-functions/generate-task-files.js
Normal file
@@ -0,0 +1,87 @@
|
||||
/**
|
||||
* generate-task-files.js
|
||||
* Direct function implementation for generating task files from tasks.json
|
||||
*/
|
||||
|
||||
import { generateTaskFiles } from '../../../../scripts/modules/task-manager.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import path from 'path';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for generateTaskFiles with error handling.
|
||||
*
|
||||
* @param {Object} args - Command arguments containing file and output path options.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||
*/
|
||||
export async function generateTaskFilesDirect(args, log) {
|
||||
try {
|
||||
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Get tasks file path
|
||||
let tasksPath;
|
||||
try {
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.error(`Error finding tasks file: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Get output directory (defaults to the same directory as the tasks file)
|
||||
let outputDir = args.output;
|
||||
if (!outputDir) {
|
||||
outputDir = path.dirname(tasksPath);
|
||||
}
|
||||
|
||||
log.info(`Generating task files from ${tasksPath} to ${outputDir}`);
|
||||
|
||||
// Execute core generateTaskFiles function in a separate try/catch
|
||||
try {
|
||||
// Enable silent mode to prevent logs from being written to stdout
|
||||
enableSilentMode();
|
||||
|
||||
// The function is synchronous despite being awaited elsewhere
|
||||
generateTaskFiles(tasksPath, outputDir);
|
||||
|
||||
// Restore normal logging after task generation
|
||||
disableSilentMode();
|
||||
} catch (genError) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in generateTaskFiles: ${genError.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'GENERATE_FILES_ERROR', message: genError.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Return success with file paths
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully generated task files`,
|
||||
tasksPath,
|
||||
outputDir,
|
||||
taskFiles: 'Individual task files have been generated in the output directory'
|
||||
},
|
||||
fromCache: false // This operation always modifies state and should never be cached
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging if an outer error occurs
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error generating task files: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'GENERATE_TASKS_ERROR', message: error.message || 'Unknown error generating task files' },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
83
mcp-server/src/core/direct-functions/list-tasks.js
Normal file
83
mcp-server/src/core/direct-functions/list-tasks.js
Normal file
@@ -0,0 +1,83 @@
|
||||
/**
|
||||
* list-tasks.js
|
||||
* Direct function implementation for listing tasks
|
||||
*/
|
||||
|
||||
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for listTasks with error handling and caching.
|
||||
*
|
||||
* @param {Object} args - Command arguments (projectRoot is expected to be resolved).
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
|
||||
*/
|
||||
export async function listTasksDirect(args, log) {
|
||||
let tasksPath;
|
||||
try {
|
||||
// Find the tasks path first - needed for cache key and execution
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
if (error.code === 'TASKS_FILE_NOT_FOUND') {
|
||||
log.error(`Tasks file not found: ${error.message}`);
|
||||
// Return the error structure expected by the calling tool/handler
|
||||
return { success: false, error: { code: error.code, message: error.message }, fromCache: false };
|
||||
}
|
||||
log.error(`Unexpected error finding tasks file: ${error.message}`);
|
||||
// Re-throw for outer catch or return structured error
|
||||
return { success: false, error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message }, fromCache: false };
|
||||
}
|
||||
|
||||
// Generate cache key *after* finding tasksPath
|
||||
const statusFilter = args.status || 'all';
|
||||
const withSubtasks = args.withSubtasks || false;
|
||||
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreListTasksAction = async () => {
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
log.info(`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`);
|
||||
const resultData = listTasks(tasksPath, statusFilter, withSubtasks, 'json');
|
||||
|
||||
if (!resultData || !resultData.tasks) {
|
||||
log.error('Invalid or empty response from listTasks core function');
|
||||
return { success: false, error: { code: 'INVALID_CORE_RESPONSE', message: 'Invalid or empty response from listTasks core function' } };
|
||||
}
|
||||
log.info(`Core listTasks function retrieved ${resultData.tasks.length} tasks`);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
return { success: true, data: resultData };
|
||||
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Core listTasks function failed: ${error.message}`);
|
||||
return { success: false, error: { code: 'LIST_TASKS_CORE_ERROR', message: error.message || 'Failed to list tasks' } };
|
||||
}
|
||||
};
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreListTasksAction,
|
||||
log
|
||||
});
|
||||
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch(error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
||||
log.error(`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`);
|
||||
console.error(error.stack);
|
||||
return { success: false, error: { code: 'CACHE_UTIL_ERROR', message: error.message }, fromCache: false };
|
||||
}
|
||||
}
|
||||
122
mcp-server/src/core/direct-functions/next-task.js
Normal file
122
mcp-server/src/core/direct-functions/next-task.js
Normal file
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* next-task.js
|
||||
* Direct function implementation for finding the next task to work on
|
||||
*/
|
||||
|
||||
import { findNextTask } from '../../../../scripts/modules/task-manager.js';
|
||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for finding the next task to work on with error handling and caching.
|
||||
*
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||
*/
|
||||
export async function nextTaskDirect(args, log) {
|
||||
let tasksPath;
|
||||
try {
|
||||
// Find the tasks path first - needed for cache key and execution
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.error(`Tasks file not found: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FILE_NOT_FOUND_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Generate cache key using task path
|
||||
const cacheKey = `nextTask:${tasksPath}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreNextTaskAction = async () => {
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
log.info(`Finding next task from ${tasksPath}`);
|
||||
|
||||
// Read tasks data
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_TASKS_FILE',
|
||||
message: `No valid tasks found in ${tasksPath}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Find the next task
|
||||
const nextTask = findNextTask(data.tasks);
|
||||
|
||||
if (!nextTask) {
|
||||
log.info('No eligible next task found. All tasks are either completed or have unsatisfied dependencies');
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: 'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',
|
||||
nextTask: null,
|
||||
allTasks: data.tasks
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Return the next task data with the full tasks array for reference
|
||||
log.info(`Successfully found next task ${nextTask.id}: ${nextTask.title}`);
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
nextTask,
|
||||
allTasks: data.tasks
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error finding next task: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message || 'Failed to find next task'
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreNextTaskAction,
|
||||
log
|
||||
});
|
||||
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch (error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself
|
||||
log.error(`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
114
mcp-server/src/core/direct-functions/parse-prd.js
Normal file
114
mcp-server/src/core/direct-functions/parse-prd.js
Normal file
@@ -0,0 +1,114 @@
|
||||
/**
|
||||
* parse-prd.js
|
||||
* Direct function implementation for parsing PRD documents
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import { parsePRD } from '../../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for parsing PRD documents and generating tasks.
|
||||
*
|
||||
* @param {Object} args - Command arguments containing input, numTasks or tasks, and output options.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||
*/
|
||||
export async function parsePRDDirect(args, log) {
|
||||
try {
|
||||
log.info(`Parsing PRD document with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Check required parameters
|
||||
if (!args.input) {
|
||||
const errorMessage = 'No input file specified. Please provide an input PRD document path.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_INPUT_FILE', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Resolve input path (relative to project root if provided)
|
||||
const projectRoot = args.projectRoot || process.cwd();
|
||||
const inputPath = path.isAbsolute(args.input) ? args.input : path.resolve(projectRoot, args.input);
|
||||
|
||||
// Determine output path
|
||||
let outputPath;
|
||||
if (args.output) {
|
||||
outputPath = path.isAbsolute(args.output) ? args.output : path.resolve(projectRoot, args.output);
|
||||
} else {
|
||||
// Default to tasks/tasks.json in the project root
|
||||
outputPath = path.resolve(projectRoot, 'tasks', 'tasks.json');
|
||||
}
|
||||
|
||||
// Verify input file exists
|
||||
if (!fs.existsSync(inputPath)) {
|
||||
const errorMessage = `Input file not found: ${inputPath}`;
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'INPUT_FILE_NOT_FOUND', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Parse number of tasks - handle both string and number values
|
||||
let numTasks = 10; // Default
|
||||
if (args.numTasks) {
|
||||
numTasks = typeof args.numTasks === 'string' ? parseInt(args.numTasks, 10) : args.numTasks;
|
||||
if (isNaN(numTasks)) {
|
||||
numTasks = 10; // Fallback to default if parsing fails
|
||||
log.warn(`Invalid numTasks value: ${args.numTasks}. Using default: 10`);
|
||||
}
|
||||
}
|
||||
|
||||
log.info(`Preparing to parse PRD from ${inputPath} and output to ${outputPath} with ${numTasks} tasks`);
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Execute core parsePRD function (which is not async but we'll await it to maintain consistency)
|
||||
await parsePRD(inputPath, outputPath, numTasks);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Since parsePRD doesn't return a value but writes to a file, we'll read the result
|
||||
// to return it to the caller
|
||||
if (fs.existsSync(outputPath)) {
|
||||
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||
log.info(`Successfully parsed PRD and generated ${tasksData.tasks?.length || 0} tasks`);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully generated ${tasksData.tasks?.length || 0} tasks from PRD`,
|
||||
taskCount: tasksData.tasks?.length || 0,
|
||||
outputPath
|
||||
},
|
||||
fromCache: false // This operation always modifies state and should never be cached
|
||||
};
|
||||
} else {
|
||||
const errorMessage = `Tasks file was not created at ${outputPath}`;
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'OUTPUT_FILE_NOT_CREATED', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error parsing PRD: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'PARSE_PRD_ERROR', message: error.message || 'Unknown error parsing PRD' },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
83
mcp-server/src/core/direct-functions/remove-dependency.js
Normal file
83
mcp-server/src/core/direct-functions/remove-dependency.js
Normal file
@@ -0,0 +1,83 @@
|
||||
/**
|
||||
* Direct function wrapper for removeDependency
|
||||
*/
|
||||
|
||||
import { removeDependency } from '../../../../scripts/modules/dependency-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Remove a dependency from a task
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {string|number} args.id - Task ID to remove dependency from
|
||||
* @param {string|number} args.dependsOn - Task ID to remove as a dependency
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||
*/
|
||||
export async function removeDependencyDirect(args, log) {
|
||||
try {
|
||||
log.info(`Removing dependency with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Validate required parameters
|
||||
if (!args.id) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Task ID (id) is required'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (!args.dependsOn) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Dependency ID (dependsOn) is required'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Format IDs for the core function
|
||||
const taskId = args.id.includes && args.id.includes('.') ? args.id : parseInt(args.id, 10);
|
||||
const dependencyId = args.dependsOn.includes && args.dependsOn.includes('.') ? args.dependsOn : parseInt(args.dependsOn, 10);
|
||||
|
||||
log.info(`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`);
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the core function
|
||||
await removeDependency(tasksPath, taskId, dependencyId);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,
|
||||
taskId: taskId,
|
||||
dependencyId: dependencyId
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in removeDependencyDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
95
mcp-server/src/core/direct-functions/remove-subtask.js
Normal file
95
mcp-server/src/core/direct-functions/remove-subtask.js
Normal file
@@ -0,0 +1,95 @@
|
||||
/**
|
||||
* Direct function wrapper for removeSubtask
|
||||
*/
|
||||
|
||||
import { removeSubtask } from '../../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Remove a subtask from its parent task
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {string} args.id - Subtask ID in format "parentId.subtaskId" (required)
|
||||
* @param {boolean} [args.convert] - Whether to convert the subtask to a standalone task
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {boolean} [args.skipGenerate] - Skip regenerating task files
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||
*/
|
||||
export async function removeSubtaskDirect(args, log) {
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
||||
|
||||
if (!args.id) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Subtask ID is required and must be in format "parentId.subtaskId"'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Validate subtask ID format
|
||||
if (!args.id.includes('.')) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: `Invalid subtask ID format: ${args.id}. Expected format: "parentId.subtaskId"`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Convert convertToTask to a boolean
|
||||
const convertToTask = args.convert === true;
|
||||
|
||||
// Determine if we should generate files
|
||||
const generateFiles = !args.skipGenerate;
|
||||
|
||||
log.info(`Removing subtask ${args.id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`);
|
||||
|
||||
const result = await removeSubtask(tasksPath, args.id, convertToTask, generateFiles);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
if (convertToTask && result) {
|
||||
// Return info about the converted task
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Subtask ${args.id} successfully converted to task #${result.id}`,
|
||||
task: result
|
||||
}
|
||||
};
|
||||
} else {
|
||||
// Return simple success message for deletion
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Subtask ${args.id} successfully removed`
|
||||
}
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Ensure silent mode is disabled even if an outer error occurs
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error in removeSubtaskDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
104
mcp-server/src/core/direct-functions/remove-task.js
Normal file
104
mcp-server/src/core/direct-functions/remove-task.js
Normal file
@@ -0,0 +1,104 @@
|
||||
/**
|
||||
* remove-task.js
|
||||
* Direct function implementation for removing a task
|
||||
*/
|
||||
|
||||
import { removeTask } from '../../../../scripts/modules/task-manager.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for removeTask with error handling.
|
||||
*
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: false }
|
||||
*/
|
||||
export async function removeTaskDirect(args, log) {
|
||||
try {
|
||||
// Find the tasks path first
|
||||
let tasksPath;
|
||||
try {
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.error(`Tasks file not found: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FILE_NOT_FOUND_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Validate task ID parameter
|
||||
const taskId = args.id;
|
||||
if (!taskId) {
|
||||
log.error('Task ID is required');
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Task ID is required'
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Skip confirmation in the direct function since it's handled by the client
|
||||
log.info(`Removing task with ID: ${taskId} from ${tasksPath}`);
|
||||
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the core removeTask function
|
||||
const result = await removeTask(tasksPath, taskId);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
log.info(`Successfully removed task: ${taskId}`);
|
||||
|
||||
// Return the result
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: result.message,
|
||||
taskId: taskId,
|
||||
tasksPath: tasksPath,
|
||||
removedTask: result.removedTask
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error removing task: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: error.code || 'REMOVE_TASK_ERROR',
|
||||
message: error.message || 'Failed to remove task'
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
// Ensure silent mode is disabled even if an outer error occurs
|
||||
disableSilentMode();
|
||||
|
||||
// Catch any unexpected errors
|
||||
log.error(`Unexpected error in removeTaskDirect: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
109
mcp-server/src/core/direct-functions/set-task-status.js
Normal file
109
mcp-server/src/core/direct-functions/set-task-status.js
Normal file
@@ -0,0 +1,109 @@
|
||||
/**
|
||||
* set-task-status.js
|
||||
* Direct function implementation for setting task status
|
||||
*/
|
||||
|
||||
import { setTaskStatus } from '../../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for setTaskStatus with error handling.
|
||||
*
|
||||
* @param {Object} args - Command arguments containing id, status and file path options.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||
*/
|
||||
export async function setTaskStatusDirect(args, log) {
|
||||
try {
|
||||
log.info(`Setting task status with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Check required parameters
|
||||
if (!args.id) {
|
||||
const errorMessage = 'No task ID specified. Please provide a task ID to update.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
if (!args.status) {
|
||||
const errorMessage = 'No status specified. Please provide a new status value.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_STATUS', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Get tasks file path
|
||||
let tasksPath;
|
||||
try {
|
||||
// The enhanced findTasksJsonPath will now search in parent directories if needed
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
log.info(`Found tasks file at: ${tasksPath}`);
|
||||
} catch (error) {
|
||||
log.error(`Error finding tasks file: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'TASKS_FILE_ERROR',
|
||||
message: `${error.message}\n\nPlease ensure you are in a Task Master project directory or use the --project-root parameter to specify the path to your project.`
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Execute core setTaskStatus function
|
||||
// We need to handle the arguments correctly - this function expects tasksPath, taskIdInput, newStatus
|
||||
const taskId = args.id;
|
||||
const newStatus = args.status;
|
||||
|
||||
log.info(`Setting task ${taskId} status to "${newStatus}"`);
|
||||
|
||||
// Call the core function
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
await setTaskStatus(tasksPath, taskId, newStatus);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
log.info(`Successfully set task ${taskId} status to ${newStatus}`);
|
||||
|
||||
// Return success data
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully updated task ${taskId} status to "${newStatus}"`,
|
||||
taskId,
|
||||
status: newStatus,
|
||||
tasksPath
|
||||
},
|
||||
fromCache: false // This operation always modifies state and should never be cached
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error setting task status: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'SET_STATUS_ERROR', message: error.message || 'Unknown error setting task status' },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error setting task status: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'SET_STATUS_ERROR', message: error.message || 'Unknown error setting task status' },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
136
mcp-server/src/core/direct-functions/show-task.js
Normal file
136
mcp-server/src/core/direct-functions/show-task.js
Normal file
@@ -0,0 +1,136 @@
|
||||
/**
|
||||
* show-task.js
|
||||
* Direct function implementation for showing task details
|
||||
*/
|
||||
|
||||
import { findTaskById } from '../../../../scripts/modules/utils.js';
|
||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for showing task details with error handling and caching.
|
||||
*
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<Object>} - Task details result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||
*/
|
||||
export async function showTaskDirect(args, log) {
|
||||
let tasksPath;
|
||||
try {
|
||||
// Find the tasks path first - needed for cache key and execution
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.error(`Tasks file not found: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FILE_NOT_FOUND_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Validate task ID
|
||||
const taskId = args.id;
|
||||
if (!taskId) {
|
||||
log.error('Task ID is required');
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INPUT_VALIDATION_ERROR',
|
||||
message: 'Task ID is required'
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Generate cache key using task path and ID
|
||||
const cacheKey = `showTask:${tasksPath}:${taskId}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreShowTaskAction = async () => {
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
log.info(`Retrieving task details for ID: ${taskId} from ${tasksPath}`);
|
||||
|
||||
// Read tasks data
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_TASKS_FILE',
|
||||
message: `No valid tasks found in ${tasksPath}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Find the specific task
|
||||
const task = findTaskById(data.tasks, taskId);
|
||||
|
||||
if (!task) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'TASK_NOT_FOUND',
|
||||
message: `Task with ID ${taskId} not found`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Return the task data with the full tasks array for reference
|
||||
// (needed for formatDependenciesWithStatus function in UI)
|
||||
log.info(`Successfully found task ${taskId}`);
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
task,
|
||||
allTasks: data.tasks
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error showing task: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CORE_FUNCTION_ERROR',
|
||||
message: error.message || 'Failed to show task details'
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreShowTaskAction,
|
||||
log
|
||||
});
|
||||
log.info(`showTaskDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch (error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself
|
||||
disableSilentMode();
|
||||
log.error(`Unexpected error during getCachedOrExecute for showTask: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
123
mcp-server/src/core/direct-functions/update-subtask-by-id.js
Normal file
123
mcp-server/src/core/direct-functions/update-subtask-by-id.js
Normal file
@@ -0,0 +1,123 @@
|
||||
/**
|
||||
* update-subtask-by-id.js
|
||||
* Direct function implementation for appending information to a specific subtask
|
||||
*/
|
||||
|
||||
import { updateSubtaskById } from '../../../../scripts/modules/task-manager.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for updateSubtaskById with error handling.
|
||||
*
|
||||
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||
*/
|
||||
export async function updateSubtaskByIdDirect(args, log) {
|
||||
try {
|
||||
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Check required parameters
|
||||
if (!args.id) {
|
||||
const errorMessage = 'No subtask ID specified. Please provide a subtask ID to update.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_SUBTASK_ID', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
if (!args.prompt) {
|
||||
const errorMessage = 'No prompt specified. Please provide a prompt with information to add to the subtask.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Validate subtask ID format
|
||||
const subtaskId = args.id;
|
||||
if (typeof subtaskId !== 'string' || !subtaskId.includes('.')) {
|
||||
const errorMessage = `Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId" (e.g., "5.2").`;
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Get tasks file path
|
||||
let tasksPath;
|
||||
try {
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.error(`Error finding tasks file: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Get research flag
|
||||
const useResearch = args.research === true;
|
||||
|
||||
log.info(`Updating subtask with ID ${subtaskId} with prompt "${args.prompt}" and research: ${useResearch}`);
|
||||
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Execute core updateSubtaskById function
|
||||
const updatedSubtask = await updateSubtaskById(tasksPath, subtaskId, args.prompt, useResearch);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Handle the case where the subtask couldn't be updated (e.g., already marked as done)
|
||||
if (!updatedSubtask) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'SUBTASK_UPDATE_FAILED',
|
||||
message: 'Failed to update subtask. It may be marked as completed, or another error occurred.'
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Return the updated subtask information
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully updated subtask with ID ${subtaskId}`,
|
||||
subtaskId,
|
||||
parentId: subtaskId.split('.')[0],
|
||||
subtask: updatedSubtask,
|
||||
tasksPath,
|
||||
useResearch
|
||||
},
|
||||
fromCache: false // This operation always modifies state and should never be cached
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
throw error; // Rethrow to be caught by outer catch block
|
||||
}
|
||||
} catch (error) {
|
||||
// Ensure silent mode is disabled
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error updating subtask by ID: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'UPDATE_SUBTASK_ERROR', message: error.message || 'Unknown error updating subtask' },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
115
mcp-server/src/core/direct-functions/update-task-by-id.js
Normal file
115
mcp-server/src/core/direct-functions/update-task-by-id.js
Normal file
@@ -0,0 +1,115 @@
|
||||
/**
|
||||
* update-task-by-id.js
|
||||
* Direct function implementation for updating a single task by ID with new information
|
||||
*/
|
||||
|
||||
import { updateTaskById } from '../../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for updateTaskById with error handling.
|
||||
*
|
||||
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||
*/
|
||||
export async function updateTaskByIdDirect(args, log) {
|
||||
try {
|
||||
log.info(`Updating task with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Check required parameters
|
||||
if (!args.id) {
|
||||
const errorMessage = 'No task ID specified. Please provide a task ID to update.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
if (!args.prompt) {
|
||||
const errorMessage = 'No prompt specified. Please provide a prompt with new information for the task update.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Parse taskId - handle both string and number values
|
||||
let taskId;
|
||||
if (typeof args.id === 'string') {
|
||||
// Handle subtask IDs (e.g., "5.2")
|
||||
if (args.id.includes('.')) {
|
||||
taskId = args.id; // Keep as string for subtask IDs
|
||||
} else {
|
||||
// Parse as integer for main task IDs
|
||||
taskId = parseInt(args.id, 10);
|
||||
if (isNaN(taskId)) {
|
||||
const errorMessage = `Invalid task ID: ${args.id}. Task ID must be a positive integer or subtask ID (e.g., "5.2").`;
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'INVALID_TASK_ID', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
} else {
|
||||
taskId = args.id;
|
||||
}
|
||||
|
||||
// Get tasks file path
|
||||
let tasksPath;
|
||||
try {
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.error(`Error finding tasks file: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Get research flag
|
||||
const useResearch = args.research === true;
|
||||
|
||||
log.info(`Updating task with ID ${taskId} with prompt "${args.prompt}" and research: ${useResearch}`);
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Execute core updateTaskById function
|
||||
await updateTaskById(tasksPath, taskId, args.prompt, useResearch);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Since updateTaskById doesn't return a value but modifies the tasks file,
|
||||
// we'll return a success message
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully updated task with ID ${taskId} based on the prompt`,
|
||||
taskId,
|
||||
tasksPath,
|
||||
useResearch
|
||||
},
|
||||
fromCache: false // This operation always modifies state and should never be cached
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error updating task by ID: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'UPDATE_TASK_ERROR', message: error.message || 'Unknown error updating task' },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
115
mcp-server/src/core/direct-functions/update-tasks.js
Normal file
115
mcp-server/src/core/direct-functions/update-tasks.js
Normal file
@@ -0,0 +1,115 @@
|
||||
/**
|
||||
* update-tasks.js
|
||||
* Direct function implementation for updating tasks based on new context/prompt
|
||||
*/
|
||||
|
||||
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for updating tasks based on new context/prompt.
|
||||
*
|
||||
* @param {Object} args - Command arguments containing fromId, prompt, useResearch and file path options.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||
*/
|
||||
export async function updateTasksDirect(args, log) {
|
||||
try {
|
||||
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Check required parameters
|
||||
if (!args.from) {
|
||||
const errorMessage = 'No from ID specified. Please provide a task ID to start updating from.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_FROM_ID', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
if (!args.prompt) {
|
||||
const errorMessage = 'No prompt specified. Please provide a prompt with new context for task updates.';
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Parse fromId - handle both string and number values
|
||||
let fromId;
|
||||
if (typeof args.from === 'string') {
|
||||
fromId = parseInt(args.from, 10);
|
||||
if (isNaN(fromId)) {
|
||||
const errorMessage = `Invalid from ID: ${args.from}. Task ID must be a positive integer.`;
|
||||
log.error(errorMessage);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'INVALID_FROM_ID', message: errorMessage },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
} else {
|
||||
fromId = args.from;
|
||||
}
|
||||
|
||||
// Get tasks file path
|
||||
let tasksPath;
|
||||
try {
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
log.error(`Error finding tasks file: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Get research flag
|
||||
const useResearch = args.research === true;
|
||||
|
||||
log.info(`Updating tasks from ID ${fromId} with prompt "${args.prompt}" and research: ${useResearch}`);
|
||||
|
||||
try {
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Execute core updateTasks function
|
||||
await updateTasks(tasksPath, fromId, args.prompt, useResearch);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
// Since updateTasks doesn't return a value but modifies the tasks file,
|
||||
// we'll return a success message
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: `Successfully updated tasks from ID ${fromId} based on the prompt`,
|
||||
fromId,
|
||||
tasksPath,
|
||||
useResearch
|
||||
},
|
||||
fromCache: false // This operation always modifies state and should never be cached
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
throw error; // Rethrow to be caught by outer catch block
|
||||
}
|
||||
} catch (error) {
|
||||
// Ensure silent mode is disabled
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error updating tasks: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'UPDATE_TASKS_ERROR', message: error.message || 'Unknown error updating tasks' },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
/**
|
||||
* Direct function wrapper for validateDependenciesCommand
|
||||
*/
|
||||
|
||||
import { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||
import fs from 'fs';
|
||||
|
||||
/**
|
||||
* Validate dependencies in tasks.json
|
||||
* @param {Object} args - Function arguments
|
||||
* @param {string} [args.file] - Path to the tasks file
|
||||
* @param {string} [args.projectRoot] - Project root directory
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||
*/
|
||||
export async function validateDependenciesDirect(args, log) {
|
||||
try {
|
||||
log.info(`Validating dependencies in tasks...`);
|
||||
|
||||
// Find the tasks.json path
|
||||
const tasksPath = findTasksJsonPath(args, log);
|
||||
|
||||
// Verify the file exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'FILE_NOT_FOUND',
|
||||
message: `Tasks file not found at ${tasksPath}`
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||
enableSilentMode();
|
||||
|
||||
// Call the original command function
|
||||
await validateDependenciesCommand(tasksPath);
|
||||
|
||||
// Restore normal logging
|
||||
disableSilentMode();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
message: 'Dependencies validated successfully',
|
||||
tasksPath
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
// Make sure to restore normal logging even if there's an error
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Error validating dependencies: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'VALIDATION_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,167 +1,87 @@
|
||||
/**
|
||||
* task-master-core.js
|
||||
* Direct function imports from Task Master modules
|
||||
*
|
||||
* This module provides direct access to Task Master core functions
|
||||
* for improved performance and error handling compared to CLI execution.
|
||||
* Central module that imports and re-exports all direct function implementations
|
||||
* for improved organization and maintainability.
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname } from 'path';
|
||||
import fs from 'fs';
|
||||
// Import direct function implementations
|
||||
import { listTasksDirect } from './direct-functions/list-tasks.js';
|
||||
import { getCacheStatsDirect } from './direct-functions/cache-stats.js';
|
||||
import { parsePRDDirect } from './direct-functions/parse-prd.js';
|
||||
import { updateTasksDirect } from './direct-functions/update-tasks.js';
|
||||
import { updateTaskByIdDirect } from './direct-functions/update-task-by-id.js';
|
||||
import { updateSubtaskByIdDirect } from './direct-functions/update-subtask-by-id.js';
|
||||
import { generateTaskFilesDirect } from './direct-functions/generate-task-files.js';
|
||||
import { setTaskStatusDirect } from './direct-functions/set-task-status.js';
|
||||
import { showTaskDirect } from './direct-functions/show-task.js';
|
||||
import { nextTaskDirect } from './direct-functions/next-task.js';
|
||||
import { expandTaskDirect } from './direct-functions/expand-task.js';
|
||||
import { addTaskDirect } from './direct-functions/add-task.js';
|
||||
import { addSubtaskDirect } from './direct-functions/add-subtask.js';
|
||||
import { removeSubtaskDirect } from './direct-functions/remove-subtask.js';
|
||||
import { analyzeTaskComplexityDirect } from './direct-functions/analyze-task-complexity.js';
|
||||
import { clearSubtasksDirect } from './direct-functions/clear-subtasks.js';
|
||||
import { expandAllTasksDirect } from './direct-functions/expand-all-tasks.js';
|
||||
import { removeDependencyDirect } from './direct-functions/remove-dependency.js';
|
||||
import { validateDependenciesDirect } from './direct-functions/validate-dependencies.js';
|
||||
import { fixDependenciesDirect } from './direct-functions/fix-dependencies.js';
|
||||
import { complexityReportDirect } from './direct-functions/complexity-report.js';
|
||||
import { addDependencyDirect } from './direct-functions/add-dependency.js';
|
||||
import { removeTaskDirect } from './direct-functions/remove-task.js';
|
||||
|
||||
// Get the current module's directory
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
// Re-export utility functions
|
||||
export { findTasksJsonPath } from './utils/path-utils.js';
|
||||
|
||||
// Import Task Master modules
|
||||
import {
|
||||
listTasks,
|
||||
// We'll import more functions as we continue implementation
|
||||
} from '../../../scripts/modules/task-manager.js';
|
||||
// Use Map for potential future enhancements like introspection or dynamic dispatch
|
||||
export const directFunctions = new Map([
|
||||
['listTasksDirect', listTasksDirect],
|
||||
['getCacheStatsDirect', getCacheStatsDirect],
|
||||
['parsePRDDirect', parsePRDDirect],
|
||||
['updateTasksDirect', updateTasksDirect],
|
||||
['updateTaskByIdDirect', updateTaskByIdDirect],
|
||||
['updateSubtaskByIdDirect', updateSubtaskByIdDirect],
|
||||
['generateTaskFilesDirect', generateTaskFilesDirect],
|
||||
['setTaskStatusDirect', setTaskStatusDirect],
|
||||
['showTaskDirect', showTaskDirect],
|
||||
['nextTaskDirect', nextTaskDirect],
|
||||
['expandTaskDirect', expandTaskDirect],
|
||||
['addTaskDirect', addTaskDirect],
|
||||
['addSubtaskDirect', addSubtaskDirect],
|
||||
['removeSubtaskDirect', removeSubtaskDirect],
|
||||
['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],
|
||||
['clearSubtasksDirect', clearSubtasksDirect],
|
||||
['expandAllTasksDirect', expandAllTasksDirect],
|
||||
['removeDependencyDirect', removeDependencyDirect],
|
||||
['validateDependenciesDirect', validateDependenciesDirect],
|
||||
['fixDependenciesDirect', fixDependenciesDirect],
|
||||
['complexityReportDirect', complexityReportDirect],
|
||||
['addDependencyDirect', addDependencyDirect],
|
||||
['removeTaskDirect', removeTaskDirect]
|
||||
]);
|
||||
|
||||
// Import context manager
|
||||
import { contextManager } from './context-manager.js';
|
||||
import { getCachedOrExecute } from '../tools/utils.js'; // Import the utility here
|
||||
|
||||
/**
|
||||
* Finds the absolute path to the tasks.json file based on project root and arguments.
|
||||
* @param {Object} args - Command arguments, potentially including 'projectRoot' and 'file'.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {string} - Absolute path to the tasks.json file.
|
||||
* @throws {Error} - If tasks.json cannot be found.
|
||||
*/
|
||||
function findTasksJsonPath(args, log) {
|
||||
// Assume projectRoot is already normalized absolute path if passed in args
|
||||
// Or use getProjectRoot if we decide to centralize that logic
|
||||
const projectRoot = args.projectRoot || process.cwd();
|
||||
log.info(`Searching for tasks.json within project root: ${projectRoot}`);
|
||||
|
||||
const possiblePaths = [];
|
||||
|
||||
// 1. If a file is explicitly provided relative to projectRoot
|
||||
if (args.file) {
|
||||
possiblePaths.push(path.resolve(projectRoot, args.file));
|
||||
}
|
||||
|
||||
// 2. Check the standard locations relative to projectRoot
|
||||
possiblePaths.push(
|
||||
path.join(projectRoot, 'tasks.json'),
|
||||
path.join(projectRoot, 'tasks', 'tasks.json')
|
||||
);
|
||||
|
||||
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
|
||||
|
||||
// Find the first existing path
|
||||
for (const p of possiblePaths) {
|
||||
if (fs.existsSync(p)) {
|
||||
log.info(`Found tasks file at: ${p}`);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
// If no file was found, throw an error
|
||||
const error = new Error(`Tasks file not found in any of the expected locations relative to ${projectRoot}: ${possiblePaths.join(', ')}`);
|
||||
error.code = 'TASKS_FILE_NOT_FOUND';
|
||||
throw error;
|
||||
}
|
||||
|
||||
/**
|
||||
* Direct function wrapper for listTasks with error handling and caching.
|
||||
*
|
||||
* @param {Object} args - Command arguments (projectRoot is expected to be resolved).
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
|
||||
*/
|
||||
export async function listTasksDirect(args, log) {
|
||||
let tasksPath;
|
||||
try {
|
||||
// Find the tasks path first - needed for cache key and execution
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
if (error.code === 'TASKS_FILE_NOT_FOUND') {
|
||||
log.error(`Tasks file not found: ${error.message}`);
|
||||
// Return the error structure expected by the calling tool/handler
|
||||
return { success: false, error: { code: error.code, message: error.message }, fromCache: false };
|
||||
}
|
||||
log.error(`Unexpected error finding tasks file: ${error.message}`);
|
||||
// Re-throw for outer catch or return structured error
|
||||
return { success: false, error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message }, fromCache: false };
|
||||
}
|
||||
|
||||
// Generate cache key *after* finding tasksPath
|
||||
const statusFilter = args.status || 'all';
|
||||
const withSubtasks = args.withSubtasks || false;
|
||||
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreListTasksAction = async () => {
|
||||
try {
|
||||
log.info(`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`);
|
||||
const resultData = listTasks(tasksPath, statusFilter, withSubtasks, 'json');
|
||||
|
||||
if (!resultData || !resultData.tasks) {
|
||||
log.error('Invalid or empty response from listTasks core function');
|
||||
return { success: false, error: { code: 'INVALID_CORE_RESPONSE', message: 'Invalid or empty response from listTasks core function' } };
|
||||
}
|
||||
log.info(`Core listTasks function retrieved ${resultData.tasks.length} tasks`);
|
||||
return { success: true, data: resultData };
|
||||
|
||||
} catch (error) {
|
||||
log.error(`Core listTasks function failed: ${error.message}`);
|
||||
return { success: false, error: { code: 'LIST_TASKS_CORE_ERROR', message: error.message || 'Failed to list tasks' } };
|
||||
}
|
||||
};
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreListTasksAction,
|
||||
log
|
||||
});
|
||||
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch(error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
||||
log.error(`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`);
|
||||
console.error(error.stack);
|
||||
return { success: false, error: { code: 'CACHE_UTIL_ERROR', message: error.message }, fromCache: false };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache statistics for monitoring
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Object} - Cache statistics
|
||||
*/
|
||||
export async function getCacheStatsDirect(args, log) {
|
||||
try {
|
||||
log.info('Retrieving cache statistics');
|
||||
const stats = contextManager.getStats();
|
||||
return {
|
||||
success: true,
|
||||
data: stats
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Error getting cache stats: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CACHE_STATS_ERROR',
|
||||
message: error.message || 'Unknown error occurred'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps Task Master functions to their direct implementation
|
||||
*/
|
||||
export const directFunctions = {
|
||||
list: listTasksDirect,
|
||||
cacheStats: getCacheStatsDirect,
|
||||
// Add more functions as we implement them
|
||||
// Re-export all direct function implementations
|
||||
export {
|
||||
listTasksDirect,
|
||||
getCacheStatsDirect,
|
||||
parsePRDDirect,
|
||||
updateTasksDirect,
|
||||
updateTaskByIdDirect,
|
||||
updateSubtaskByIdDirect,
|
||||
generateTaskFilesDirect,
|
||||
setTaskStatusDirect,
|
||||
showTaskDirect,
|
||||
nextTaskDirect,
|
||||
expandTaskDirect,
|
||||
addTaskDirect,
|
||||
addSubtaskDirect,
|
||||
removeSubtaskDirect,
|
||||
analyzeTaskComplexityDirect,
|
||||
clearSubtasksDirect,
|
||||
expandAllTasksDirect,
|
||||
removeDependencyDirect,
|
||||
validateDependenciesDirect,
|
||||
fixDependenciesDirect,
|
||||
complexityReportDirect,
|
||||
addDependencyDirect,
|
||||
removeTaskDirect
|
||||
};
|
||||
188
mcp-server/src/core/utils/ai-client-utils.js
Normal file
188
mcp-server/src/core/utils/ai-client-utils.js
Normal file
@@ -0,0 +1,188 @@
|
||||
/**
|
||||
* ai-client-utils.js
|
||||
* Utility functions for initializing AI clients in MCP context
|
||||
*/
|
||||
|
||||
import { Anthropic } from '@anthropic-ai/sdk';
|
||||
import dotenv from 'dotenv';
|
||||
|
||||
// Load environment variables for CLI mode
|
||||
dotenv.config();
|
||||
|
||||
// Default model configuration from CLI environment
|
||||
const DEFAULT_MODEL_CONFIG = {
|
||||
model: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 64000,
|
||||
temperature: 0.2
|
||||
};
|
||||
|
||||
/**
|
||||
* Get an Anthropic client instance initialized with MCP session environment variables
|
||||
* @param {Object} [session] - Session object from MCP containing environment variables
|
||||
* @param {Object} [log] - Logger object to use (defaults to console)
|
||||
* @returns {Anthropic} Anthropic client instance
|
||||
* @throws {Error} If API key is missing
|
||||
*/
|
||||
export function getAnthropicClientForMCP(session, log = console) {
|
||||
try {
|
||||
// Extract API key from session.env or fall back to environment variables
|
||||
const apiKey = session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY;
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('ANTHROPIC_API_KEY not found in session environment or process.env');
|
||||
}
|
||||
|
||||
// Initialize and return a new Anthropic client
|
||||
return new Anthropic({
|
||||
apiKey,
|
||||
defaultHeaders: {
|
||||
'anthropic-beta': 'output-128k-2025-02-19' // Include header for increased token limit
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
log.error(`Failed to initialize Anthropic client: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a Perplexity client instance initialized with MCP session environment variables
|
||||
* @param {Object} [session] - Session object from MCP containing environment variables
|
||||
* @param {Object} [log] - Logger object to use (defaults to console)
|
||||
* @returns {OpenAI} OpenAI client configured for Perplexity API
|
||||
* @throws {Error} If API key is missing or OpenAI package can't be imported
|
||||
*/
|
||||
export async function getPerplexityClientForMCP(session, log = console) {
|
||||
try {
|
||||
// Extract API key from session.env or fall back to environment variables
|
||||
const apiKey = session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY;
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('PERPLEXITY_API_KEY not found in session environment or process.env');
|
||||
}
|
||||
|
||||
// Dynamically import OpenAI (it may not be used in all contexts)
|
||||
const { default: OpenAI } = await import('openai');
|
||||
|
||||
// Initialize and return a new OpenAI client configured for Perplexity
|
||||
return new OpenAI({
|
||||
apiKey,
|
||||
baseURL: 'https://api.perplexity.ai'
|
||||
});
|
||||
} catch (error) {
|
||||
log.error(`Failed to initialize Perplexity client: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get model configuration from session environment or fall back to defaults
|
||||
* @param {Object} [session] - Session object from MCP containing environment variables
|
||||
* @param {Object} [defaults] - Default model configuration to use if not in session
|
||||
* @returns {Object} Model configuration with model, maxTokens, and temperature
|
||||
*/
|
||||
export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
|
||||
// Get values from session or fall back to defaults
|
||||
return {
|
||||
model: session?.env?.MODEL || defaults.model,
|
||||
maxTokens: parseInt(session?.env?.MAX_TOKENS || defaults.maxTokens),
|
||||
temperature: parseFloat(session?.env?.TEMPERATURE || defaults.temperature)
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the best available AI model based on specified options
|
||||
* @param {Object} session - Session object from MCP containing environment variables
|
||||
* @param {Object} options - Options for model selection
|
||||
* @param {boolean} [options.requiresResearch=false] - Whether the operation requires research capabilities
|
||||
* @param {boolean} [options.claudeOverloaded=false] - Whether Claude is currently overloaded
|
||||
* @param {Object} [log] - Logger object to use (defaults to console)
|
||||
* @returns {Promise<Object>} Selected model info with type and client
|
||||
* @throws {Error} If no AI models are available
|
||||
*/
|
||||
export async function getBestAvailableAIModel(session, options = {}, log = console) {
|
||||
const { requiresResearch = false, claudeOverloaded = false } = options;
|
||||
|
||||
// Test case: When research is needed but no Perplexity, use Claude
|
||||
if (requiresResearch &&
|
||||
!(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY) &&
|
||||
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
||||
try {
|
||||
log.warn('Perplexity not available for research, using Claude');
|
||||
const client = getAnthropicClientForMCP(session, log);
|
||||
return { type: 'claude', client };
|
||||
} catch (error) {
|
||||
log.error(`Claude not available: ${error.message}`);
|
||||
throw new Error('No AI models available for research');
|
||||
}
|
||||
}
|
||||
|
||||
// Regular path: Perplexity for research when available
|
||||
if (requiresResearch && (session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY)) {
|
||||
try {
|
||||
const client = await getPerplexityClientForMCP(session, log);
|
||||
return { type: 'perplexity', client };
|
||||
} catch (error) {
|
||||
log.warn(`Perplexity not available: ${error.message}`);
|
||||
// Fall through to Claude as backup
|
||||
}
|
||||
}
|
||||
|
||||
// Test case: Claude for overloaded scenario
|
||||
if (claudeOverloaded && (session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
||||
try {
|
||||
log.warn('Claude is overloaded but no alternatives are available. Proceeding with Claude anyway.');
|
||||
const client = getAnthropicClientForMCP(session, log);
|
||||
return { type: 'claude', client };
|
||||
} catch (error) {
|
||||
log.error(`Claude not available despite being overloaded: ${error.message}`);
|
||||
throw new Error('No AI models available');
|
||||
}
|
||||
}
|
||||
|
||||
// Default case: Use Claude when available and not overloaded
|
||||
if (!claudeOverloaded && (session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
||||
try {
|
||||
const client = getAnthropicClientForMCP(session, log);
|
||||
return { type: 'claude', client };
|
||||
} catch (error) {
|
||||
log.warn(`Claude not available: ${error.message}`);
|
||||
// Fall through to error if no other options
|
||||
}
|
||||
}
|
||||
|
||||
// If we got here, no models were successfully initialized
|
||||
throw new Error('No AI models available. Please check your API keys.');
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle Claude API errors with user-friendly messages
|
||||
* @param {Error} error - The error from Claude API
|
||||
* @returns {string} User-friendly error message
|
||||
*/
|
||||
export function handleClaudeError(error) {
|
||||
// Check if it's a structured error response
|
||||
if (error.type === 'error' && error.error) {
|
||||
switch (error.error.type) {
|
||||
case 'overloaded_error':
|
||||
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
|
||||
case 'rate_limit_error':
|
||||
return 'You have exceeded the rate limit. Please wait a few minutes before making more requests.';
|
||||
case 'invalid_request_error':
|
||||
return 'There was an issue with the request format. If this persists, please report it as a bug.';
|
||||
default:
|
||||
return `Claude API error: ${error.error.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Check for network/timeout errors
|
||||
if (error.message?.toLowerCase().includes('timeout')) {
|
||||
return 'The request to Claude timed out. Please try again.';
|
||||
}
|
||||
if (error.message?.toLowerCase().includes('network')) {
|
||||
return 'There was a network error connecting to Claude. Please check your internet connection and try again.';
|
||||
}
|
||||
|
||||
// Default error message
|
||||
return `Error communicating with Claude: ${error.message}`;
|
||||
}
|
||||
217
mcp-server/src/core/utils/async-manager.js
Normal file
217
mcp-server/src/core/utils/async-manager.js
Normal file
@@ -0,0 +1,217 @@
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
class AsyncOperationManager {
|
||||
constructor() {
|
||||
this.operations = new Map(); // Stores active operation state
|
||||
this.completedOperations = new Map(); // Stores completed operations
|
||||
this.maxCompletedOperations = 100; // Maximum number of completed operations to store
|
||||
this.listeners = new Map(); // For potential future notifications
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an operation to be executed asynchronously.
|
||||
* @param {Function} operationFn - The async function to execute (e.g., a Direct function).
|
||||
* @param {Object} args - Arguments to pass to the operationFn.
|
||||
* @param {Object} context - The MCP tool context { log, reportProgress, session }.
|
||||
* @returns {string} The unique ID assigned to this operation.
|
||||
*/
|
||||
addOperation(operationFn, args, context) {
|
||||
const operationId = `op-${uuidv4()}`;
|
||||
const operation = {
|
||||
id: operationId,
|
||||
status: 'pending',
|
||||
startTime: Date.now(),
|
||||
endTime: null,
|
||||
result: null,
|
||||
error: null,
|
||||
// Store necessary parts of context, especially log for background execution
|
||||
log: context.log,
|
||||
reportProgress: context.reportProgress, // Pass reportProgress through
|
||||
session: context.session // Pass session through if needed by the operationFn
|
||||
};
|
||||
this.operations.set(operationId, operation);
|
||||
this.log(operationId, 'info', `Operation added.`);
|
||||
|
||||
// Start execution in the background (don't await here)
|
||||
this._runOperation(operationId, operationFn, args, context).catch(err => {
|
||||
// Catch unexpected errors during the async execution setup itself
|
||||
this.log(operationId, 'error', `Critical error starting operation: ${err.message}`, { stack: err.stack });
|
||||
operation.status = 'failed';
|
||||
operation.error = { code: 'MANAGER_EXECUTION_ERROR', message: err.message };
|
||||
operation.endTime = Date.now();
|
||||
|
||||
// Move to completed operations
|
||||
this._moveToCompleted(operationId);
|
||||
});
|
||||
|
||||
return operationId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal function to execute the operation.
|
||||
* @param {string} operationId - The ID of the operation.
|
||||
* @param {Function} operationFn - The async function to execute.
|
||||
* @param {Object} args - Arguments for the function.
|
||||
* @param {Object} context - The original MCP tool context.
|
||||
*/
|
||||
async _runOperation(operationId, operationFn, args, context) {
|
||||
const operation = this.operations.get(operationId);
|
||||
if (!operation) return; // Should not happen
|
||||
|
||||
operation.status = 'running';
|
||||
this.log(operationId, 'info', `Operation running.`);
|
||||
this.emit('statusChanged', { operationId, status: 'running' });
|
||||
|
||||
try {
|
||||
// Pass the necessary context parts to the direct function
|
||||
// The direct function needs to be adapted if it needs reportProgress
|
||||
// We pass the original context's log, plus our wrapped reportProgress
|
||||
const result = await operationFn(args, operation.log, {
|
||||
reportProgress: (progress) => this._handleProgress(operationId, progress),
|
||||
mcpLog: operation.log, // Pass log as mcpLog if direct fn expects it
|
||||
session: operation.session
|
||||
});
|
||||
|
||||
operation.status = result.success ? 'completed' : 'failed';
|
||||
operation.result = result.success ? result.data : null;
|
||||
operation.error = result.success ? null : result.error;
|
||||
this.log(operationId, 'info', `Operation finished with status: ${operation.status}`);
|
||||
|
||||
} catch (error) {
|
||||
this.log(operationId, 'error', `Operation failed with error: ${error.message}`, { stack: error.stack });
|
||||
operation.status = 'failed';
|
||||
operation.error = { code: 'OPERATION_EXECUTION_ERROR', message: error.message };
|
||||
} finally {
|
||||
operation.endTime = Date.now();
|
||||
this.emit('statusChanged', { operationId, status: operation.status, result: operation.result, error: operation.error });
|
||||
|
||||
// Move to completed operations if done or failed
|
||||
if (operation.status === 'completed' || operation.status === 'failed') {
|
||||
this._moveToCompleted(operationId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Move an operation from active operations to completed operations history.
|
||||
* @param {string} operationId - The ID of the operation to move.
|
||||
* @private
|
||||
*/
|
||||
_moveToCompleted(operationId) {
|
||||
const operation = this.operations.get(operationId);
|
||||
if (!operation) return;
|
||||
|
||||
// Store only the necessary data in completed operations
|
||||
const completedData = {
|
||||
id: operation.id,
|
||||
status: operation.status,
|
||||
startTime: operation.startTime,
|
||||
endTime: operation.endTime,
|
||||
result: operation.result,
|
||||
error: operation.error,
|
||||
};
|
||||
|
||||
this.completedOperations.set(operationId, completedData);
|
||||
this.operations.delete(operationId);
|
||||
|
||||
// Trim completed operations if exceeding maximum
|
||||
if (this.completedOperations.size > this.maxCompletedOperations) {
|
||||
// Get the oldest operation (sorted by endTime)
|
||||
const oldest = [...this.completedOperations.entries()]
|
||||
.sort((a, b) => a[1].endTime - b[1].endTime)[0];
|
||||
|
||||
if (oldest) {
|
||||
this.completedOperations.delete(oldest[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles progress updates from the running operation and forwards them.
|
||||
* @param {string} operationId - The ID of the operation reporting progress.
|
||||
* @param {Object} progress - The progress object { progress, total? }.
|
||||
*/
|
||||
_handleProgress(operationId, progress) {
|
||||
const operation = this.operations.get(operationId);
|
||||
if (operation && operation.reportProgress) {
|
||||
try {
|
||||
// Use the reportProgress function captured from the original context
|
||||
operation.reportProgress(progress);
|
||||
this.log(operationId, 'debug', `Reported progress: ${JSON.stringify(progress)}`);
|
||||
} catch(err) {
|
||||
this.log(operationId, 'warn', `Failed to report progress: ${err.message}`);
|
||||
// Don't stop the operation, just log the reporting failure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the status and result/error of an operation.
|
||||
* @param {string} operationId - The ID of the operation.
|
||||
* @returns {Object | null} The operation details or null if not found.
|
||||
*/
|
||||
getStatus(operationId) {
|
||||
// First check active operations
|
||||
const operation = this.operations.get(operationId);
|
||||
if (operation) {
|
||||
return {
|
||||
id: operation.id,
|
||||
status: operation.status,
|
||||
startTime: operation.startTime,
|
||||
endTime: operation.endTime,
|
||||
result: operation.result,
|
||||
error: operation.error,
|
||||
};
|
||||
}
|
||||
|
||||
// Then check completed operations
|
||||
const completedOperation = this.completedOperations.get(operationId);
|
||||
if (completedOperation) {
|
||||
return completedOperation;
|
||||
}
|
||||
|
||||
// Operation not found in either active or completed
|
||||
return {
|
||||
error: {
|
||||
code: 'OPERATION_NOT_FOUND',
|
||||
message: `Operation ID ${operationId} not found. It may have been completed and removed from history, or the ID may be invalid.`
|
||||
},
|
||||
status: 'not_found'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal logging helper to prefix logs with the operation ID.
|
||||
* @param {string} operationId - The ID of the operation.
|
||||
* @param {'info'|'warn'|'error'|'debug'} level - Log level.
|
||||
* @param {string} message - Log message.
|
||||
* @param {Object} [meta] - Additional metadata.
|
||||
*/
|
||||
log(operationId, level, message, meta = {}) {
|
||||
const operation = this.operations.get(operationId);
|
||||
// Use the logger instance associated with the operation if available, otherwise console
|
||||
const logger = operation?.log || console;
|
||||
const logFn = logger[level] || logger.log || console.log; // Fallback
|
||||
logFn(`[AsyncOp ${operationId}] ${message}`, meta);
|
||||
}
|
||||
|
||||
// --- Basic Event Emitter ---
|
||||
on(eventName, listener) {
|
||||
if (!this.listeners.has(eventName)) {
|
||||
this.listeners.set(eventName, []);
|
||||
}
|
||||
this.listeners.get(eventName).push(listener);
|
||||
}
|
||||
|
||||
emit(eventName, data) {
|
||||
if (this.listeners.has(eventName)) {
|
||||
this.listeners.get(eventName).forEach(listener => listener(data));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export a singleton instance
|
||||
const asyncOperationManager = new AsyncOperationManager();
|
||||
|
||||
// Export the manager and potentially the class if needed elsewhere
|
||||
export { asyncOperationManager, AsyncOperationManager };
|
||||
43
mcp-server/src/core/utils/env-utils.js
Normal file
43
mcp-server/src/core/utils/env-utils.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/**
|
||||
* Temporarily sets environment variables from session.env, executes an action,
|
||||
* and restores the original environment variables.
|
||||
* @param {object | undefined} sessionEnv - The environment object from the session.
|
||||
* @param {Function} actionFn - An async function to execute with the temporary environment.
|
||||
* @returns {Promise<any>} The result of the actionFn.
|
||||
*/
|
||||
export async function withSessionEnv(sessionEnv, actionFn) {
|
||||
if (!sessionEnv || typeof sessionEnv !== 'object' || Object.keys(sessionEnv).length === 0) {
|
||||
// If no sessionEnv is provided, just run the action directly
|
||||
return await actionFn();
|
||||
}
|
||||
|
||||
const originalEnv = {};
|
||||
const keysToRestore = [];
|
||||
|
||||
// Set environment variables from sessionEnv
|
||||
for (const key in sessionEnv) {
|
||||
if (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {
|
||||
// Store original value if it exists, otherwise mark for deletion
|
||||
if (process.env[key] !== undefined) {
|
||||
originalEnv[key] = process.env[key];
|
||||
}
|
||||
keysToRestore.push(key);
|
||||
process.env[key] = sessionEnv[key];
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// Execute the provided action function
|
||||
return await actionFn();
|
||||
} finally {
|
||||
// Restore original environment variables
|
||||
for (const key of keysToRestore) {
|
||||
if (Object.prototype.hasOwnProperty.call(originalEnv, key)) {
|
||||
process.env[key] = originalEnv[key];
|
||||
} else {
|
||||
// If the key didn't exist originally, delete it
|
||||
delete process.env[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
268
mcp-server/src/core/utils/path-utils.js
Normal file
268
mcp-server/src/core/utils/path-utils.js
Normal file
@@ -0,0 +1,268 @@
|
||||
/**
|
||||
* path-utils.js
|
||||
* Utility functions for file path operations in Task Master
|
||||
*
|
||||
* This module provides robust path resolution for both:
|
||||
* 1. PACKAGE PATH: Where task-master code is installed
|
||||
* (global node_modules OR local ./node_modules/task-master OR direct from repo)
|
||||
* 2. PROJECT PATH: Where user's tasks.json resides (typically user's project root)
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import { fileURLToPath } from 'url';
|
||||
import os from 'os';
|
||||
|
||||
// Store last found project root to improve performance on subsequent calls (primarily for CLI)
|
||||
export let lastFoundProjectRoot = null;
|
||||
|
||||
// Project marker files that indicate a potential project root
|
||||
export const PROJECT_MARKERS = [
|
||||
// Task Master specific
|
||||
'tasks.json',
|
||||
'tasks/tasks.json',
|
||||
|
||||
// Common version control
|
||||
'.git',
|
||||
'.svn',
|
||||
|
||||
// Common package files
|
||||
'package.json',
|
||||
'pyproject.toml',
|
||||
'Gemfile',
|
||||
'go.mod',
|
||||
'Cargo.toml',
|
||||
|
||||
// Common IDE/editor folders
|
||||
'.cursor',
|
||||
'.vscode',
|
||||
'.idea',
|
||||
|
||||
// Common dependency directories (check if directory)
|
||||
'node_modules',
|
||||
'venv',
|
||||
'.venv',
|
||||
|
||||
// Common config files
|
||||
'.env',
|
||||
'.eslintrc',
|
||||
'tsconfig.json',
|
||||
'babel.config.js',
|
||||
'jest.config.js',
|
||||
'webpack.config.js',
|
||||
|
||||
// Common CI/CD files
|
||||
'.github/workflows',
|
||||
'.gitlab-ci.yml',
|
||||
'.circleci/config.yml'
|
||||
];
|
||||
|
||||
/**
|
||||
* Gets the path to the task-master package installation directory
|
||||
* NOTE: This might become unnecessary if CLI fallback in MCP utils is removed.
|
||||
* @returns {string} - Absolute path to the package installation directory
|
||||
*/
|
||||
export function getPackagePath() {
|
||||
// When running from source, __dirname is the directory containing this file
|
||||
// When running from npm, we need to find the package root
|
||||
const thisFilePath = fileURLToPath(import.meta.url);
|
||||
const thisFileDir = path.dirname(thisFilePath);
|
||||
|
||||
// Navigate from core/utils up to the package root
|
||||
// In dev: /path/to/task-master/mcp-server/src/core/utils -> /path/to/task-master
|
||||
// In npm: /path/to/node_modules/task-master/mcp-server/src/core/utils -> /path/to/node_modules/task-master
|
||||
return path.resolve(thisFileDir, '../../../../');
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the absolute path to the tasks.json file based on project root and arguments.
|
||||
* @param {Object} args - Command arguments, potentially including 'projectRoot' and 'file'.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {string} - Absolute path to the tasks.json file.
|
||||
* @throws {Error} - If tasks.json cannot be found.
|
||||
*/
|
||||
export function findTasksJsonPath(args, log) {
|
||||
// PRECEDENCE ORDER for finding tasks.json:
|
||||
// 1. Explicitly provided `projectRoot` in args (Highest priority, expected in MCP context)
|
||||
// 2. Previously found/cached `lastFoundProjectRoot` (primarily for CLI performance)
|
||||
// 3. Search upwards from current working directory (`process.cwd()`) - CLI usage
|
||||
|
||||
// 1. If project root is explicitly provided (e.g., from MCP session), use it directly
|
||||
if (args.projectRoot) {
|
||||
const projectRoot = args.projectRoot;
|
||||
log.info(`Using explicitly provided project root: ${projectRoot}`);
|
||||
try {
|
||||
// This will throw if tasks.json isn't found within this root
|
||||
return findTasksJsonInDirectory(projectRoot, args.file, log);
|
||||
} catch (error) {
|
||||
// Include debug info in error
|
||||
const debugInfo = {
|
||||
projectRoot,
|
||||
currentDir: process.cwd(),
|
||||
serverDir: path.dirname(process.argv[1]),
|
||||
possibleProjectRoot: path.resolve(path.dirname(process.argv[1]), '../..'),
|
||||
lastFoundProjectRoot,
|
||||
searchedPaths: error.message
|
||||
};
|
||||
|
||||
error.message = `Tasks file not found in any of the expected locations relative to project root "${projectRoot}" (from session).\nDebug Info: ${JSON.stringify(debugInfo, null, 2)}`;
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Fallback logic primarily for CLI or when projectRoot isn't passed ---
|
||||
|
||||
// 2. If we have a last known project root that worked, try it first
|
||||
if (lastFoundProjectRoot) {
|
||||
log.info(`Trying last known project root: ${lastFoundProjectRoot}`);
|
||||
try {
|
||||
// Use the cached root
|
||||
const tasksPath = findTasksJsonInDirectory(lastFoundProjectRoot, args.file, log);
|
||||
return tasksPath; // Return if found in cached root
|
||||
} catch (error) {
|
||||
log.info(`Task file not found in last known project root, continuing search.`);
|
||||
// Continue with search if not found in cache
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Start search from current directory (most common CLI scenario)
|
||||
const startDir = process.cwd();
|
||||
log.info(`Searching for tasks.json starting from current directory: ${startDir}`);
|
||||
|
||||
// Try to find tasks.json by walking up the directory tree from cwd
|
||||
try {
|
||||
// This will throw if not found in the CWD tree
|
||||
return findTasksJsonWithParentSearch(startDir, args.file, log);
|
||||
} catch (error) {
|
||||
// If all attempts fail, augment and throw the original error from CWD search
|
||||
error.message = `${error.message}\n\nPossible solutions:\n1. Run the command from your project directory containing tasks.json\n2. Use --project-root=/path/to/project to specify the project location (if using CLI)\n3. Ensure the project root is correctly passed from the client (if using MCP)\n\nCurrent working directory: ${startDir}\nLast known project root: ${lastFoundProjectRoot}\nProject root from args: ${args.projectRoot}`;
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a directory contains any project marker files or directories
|
||||
* @param {string} dirPath - Directory to check
|
||||
* @returns {boolean} - True if the directory contains any project markers
|
||||
*/
|
||||
function hasProjectMarkers(dirPath) {
|
||||
return PROJECT_MARKERS.some(marker => {
|
||||
const markerPath = path.join(dirPath, marker);
|
||||
// Check if the marker exists as either a file or directory
|
||||
return fs.existsSync(markerPath);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Search for tasks.json in a specific directory
|
||||
* @param {string} dirPath - Directory to search in
|
||||
* @param {string} explicitFilePath - Optional explicit file path relative to dirPath
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {string} - Absolute path to tasks.json
|
||||
* @throws {Error} - If tasks.json cannot be found
|
||||
*/
|
||||
function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
|
||||
const possiblePaths = [];
|
||||
|
||||
// 1. If a file is explicitly provided relative to dirPath
|
||||
if (explicitFilePath) {
|
||||
possiblePaths.push(path.resolve(dirPath, explicitFilePath));
|
||||
}
|
||||
|
||||
// 2. Check the standard locations relative to dirPath
|
||||
possiblePaths.push(
|
||||
path.join(dirPath, 'tasks.json'),
|
||||
path.join(dirPath, 'tasks', 'tasks.json')
|
||||
);
|
||||
|
||||
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
|
||||
|
||||
// Find the first existing path
|
||||
for (const p of possiblePaths) {
|
||||
if (fs.existsSync(p)) {
|
||||
log.info(`Found tasks file at: ${p}`);
|
||||
// Store the project root for future use
|
||||
lastFoundProjectRoot = dirPath;
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
// If no file was found, throw an error
|
||||
const error = new Error(`Tasks file not found in any of the expected locations relative to ${dirPath}: ${possiblePaths.join(', ')}`);
|
||||
error.code = 'TASKS_FILE_NOT_FOUND';
|
||||
throw error;
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively search for tasks.json in the given directory and parent directories
|
||||
* Also looks for project markers to identify potential project roots
|
||||
* @param {string} startDir - Directory to start searching from
|
||||
* @param {string} explicitFilePath - Optional explicit file path
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {string} - Absolute path to tasks.json
|
||||
* @throws {Error} - If tasks.json cannot be found in any parent directory
|
||||
*/
|
||||
function findTasksJsonWithParentSearch(startDir, explicitFilePath, log) {
|
||||
let currentDir = startDir;
|
||||
const rootDir = path.parse(currentDir).root;
|
||||
|
||||
// Keep traversing up until we hit the root directory
|
||||
while (currentDir !== rootDir) {
|
||||
// First check for tasks.json directly
|
||||
try {
|
||||
return findTasksJsonInDirectory(currentDir, explicitFilePath, log);
|
||||
} catch (error) {
|
||||
// If tasks.json not found but the directory has project markers,
|
||||
// log it as a potential project root (helpful for debugging)
|
||||
if (hasProjectMarkers(currentDir)) {
|
||||
log.info(`Found project markers in ${currentDir}, but no tasks.json`);
|
||||
}
|
||||
|
||||
// Move up to parent directory
|
||||
const parentDir = path.dirname(currentDir);
|
||||
|
||||
// Check if we've reached the root
|
||||
if (parentDir === currentDir) {
|
||||
break;
|
||||
}
|
||||
|
||||
log.info(`Tasks file not found in ${currentDir}, searching in parent directory: ${parentDir}`);
|
||||
currentDir = parentDir;
|
||||
}
|
||||
}
|
||||
|
||||
// If we've searched all the way to the root and found nothing
|
||||
const error = new Error(`Tasks file not found in ${startDir} or any parent directory.`);
|
||||
error.code = 'TASKS_FILE_NOT_FOUND';
|
||||
throw error;
|
||||
}
|
||||
|
||||
// Note: findTasksWithNpmConsideration is not used by findTasksJsonPath and might be legacy or used elsewhere.
|
||||
// If confirmed unused, it could potentially be removed in a separate cleanup.
|
||||
function findTasksWithNpmConsideration(startDir, log) {
|
||||
// First try our recursive parent search from cwd
|
||||
try {
|
||||
return findTasksJsonWithParentSearch(startDir, null, log);
|
||||
} catch (error) {
|
||||
// If that fails, try looking relative to the executable location
|
||||
const execPath = process.argv[1];
|
||||
const execDir = path.dirname(execPath);
|
||||
log.info(`Looking for tasks file relative to executable at: ${execDir}`);
|
||||
|
||||
try {
|
||||
return findTasksJsonWithParentSearch(execDir, null, log);
|
||||
} catch (secondError) {
|
||||
// If that also fails, check standard locations in user's home directory
|
||||
const homeDir = os.homedir();
|
||||
log.info(`Looking for tasks file in home directory: ${homeDir}`);
|
||||
|
||||
try {
|
||||
// Check standard locations in home dir
|
||||
return findTasksJsonInDirectory(path.join(homeDir, '.task-master'), null, log);
|
||||
} catch (thirdError) {
|
||||
// If all approaches fail, throw the original error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import { fileURLToPath } from "url";
|
||||
import fs from "fs";
|
||||
import logger from "./logger.js";
|
||||
import { registerTaskMasterTools } from "./tools/index.js";
|
||||
import { asyncOperationManager } from './core/utils/async-manager.js';
|
||||
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
@@ -30,9 +31,12 @@ class TaskMasterMCPServer {
|
||||
this.server = new FastMCP(this.options);
|
||||
this.initialized = false;
|
||||
|
||||
// this.server.addResource({});
|
||||
this.server.addResource({});
|
||||
|
||||
// this.server.addResourceTemplate({});
|
||||
this.server.addResourceTemplate({});
|
||||
|
||||
// Make the manager accessible (e.g., pass it to tool registration)
|
||||
this.asyncManager = asyncOperationManager;
|
||||
|
||||
// Bind methods
|
||||
this.init = this.init.bind(this);
|
||||
@@ -49,8 +53,8 @@ class TaskMasterMCPServer {
|
||||
async init() {
|
||||
if (this.initialized) return;
|
||||
|
||||
// Register Task Master tools
|
||||
registerTaskMasterTools(this.server);
|
||||
// Pass the manager instance to the tool registration function
|
||||
registerTaskMasterTools(this.server, this.asyncManager);
|
||||
|
||||
this.initialized = true;
|
||||
|
||||
@@ -83,4 +87,7 @@ class TaskMasterMCPServer {
|
||||
}
|
||||
}
|
||||
|
||||
// Export the manager from here as well, if needed elsewhere
|
||||
export { asyncOperationManager };
|
||||
|
||||
export default TaskMasterMCPServer;
|
||||
|
||||
@@ -11,7 +11,7 @@ const LOG_LEVELS = {
|
||||
|
||||
// Get log level from environment or default to info
|
||||
const LOG_LEVEL = process.env.LOG_LEVEL
|
||||
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
|
||||
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] ?? LOG_LEVELS.info
|
||||
: LOG_LEVELS.info;
|
||||
|
||||
/**
|
||||
@@ -20,43 +20,66 @@ const LOG_LEVEL = process.env.LOG_LEVEL
|
||||
* @param {...any} args - Arguments to log
|
||||
*/
|
||||
function log(level, ...args) {
|
||||
const icons = {
|
||||
debug: chalk.gray("🔍"),
|
||||
info: chalk.blue("ℹ️"),
|
||||
warn: chalk.yellow("⚠️"),
|
||||
error: chalk.red("❌"),
|
||||
success: chalk.green("✅"),
|
||||
// Use text prefixes instead of emojis
|
||||
const prefixes = {
|
||||
debug: chalk.gray("[DEBUG]"),
|
||||
info: chalk.blue("[INFO]"),
|
||||
warn: chalk.yellow("[WARN]"),
|
||||
error: chalk.red("[ERROR]"),
|
||||
success: chalk.green("[SUCCESS]"),
|
||||
};
|
||||
|
||||
if (LOG_LEVELS[level] >= LOG_LEVEL) {
|
||||
const icon = icons[level] || "";
|
||||
if (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {
|
||||
const prefix = prefixes[level] || "";
|
||||
let coloredArgs = args;
|
||||
|
||||
if (level === "error") {
|
||||
console.error(icon, chalk.red(...args));
|
||||
} else if (level === "warn") {
|
||||
console.warn(icon, chalk.yellow(...args));
|
||||
} else if (level === "success") {
|
||||
console.log(icon, chalk.green(...args));
|
||||
} else if (level === "info") {
|
||||
console.log(icon, chalk.blue(...args));
|
||||
} else {
|
||||
console.log(icon, ...args);
|
||||
try {
|
||||
switch(level) {
|
||||
case "error":
|
||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.red(arg) : arg);
|
||||
break;
|
||||
case "warn":
|
||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.yellow(arg) : arg);
|
||||
break;
|
||||
case "success":
|
||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.green(arg) : arg);
|
||||
break;
|
||||
case "info":
|
||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.blue(arg) : arg);
|
||||
break;
|
||||
case "debug":
|
||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.gray(arg) : arg);
|
||||
break;
|
||||
// default: use original args (no color)
|
||||
}
|
||||
} catch (colorError) {
|
||||
// Fallback if chalk fails on an argument
|
||||
// Use console.error here for internal logger errors, separate from normal logging
|
||||
console.error("Internal Logger Error applying chalk color:", colorError);
|
||||
coloredArgs = args;
|
||||
}
|
||||
|
||||
// Revert to console.log - FastMCP's context logger (context.log)
|
||||
// is responsible for directing logs correctly (e.g., to stderr)
|
||||
// during tool execution without upsetting the client connection.
|
||||
// Logs outside of tool execution (like startup) will go to stdout.
|
||||
console.log(prefix, ...coloredArgs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a logger object with methods for different log levels
|
||||
* Can be used as a drop-in replacement for existing logger initialization
|
||||
* @returns {Object} Logger object with info, error, debug, warn, and success methods
|
||||
*/
|
||||
export function createLogger() {
|
||||
const createLogMethod = (level) => (...args) => log(level, ...args);
|
||||
|
||||
return {
|
||||
debug: (message) => log("debug", message),
|
||||
info: (message) => log("info", message),
|
||||
warn: (message) => log("warn", message),
|
||||
error: (message) => log("error", message),
|
||||
success: (message) => log("success", message),
|
||||
debug: createLogMethod("debug"),
|
||||
info: createLogMethod("info"),
|
||||
warn: createLogMethod("warn"),
|
||||
error: createLogMethod("error"),
|
||||
success: createLogMethod("success"),
|
||||
log: log, // Also expose the raw log function
|
||||
};
|
||||
}
|
||||
|
||||
65
mcp-server/src/tools/add-dependency.js
Normal file
65
mcp-server/src/tools/add-dependency.js
Normal file
@@ -0,0 +1,65 @@
|
||||
/**
|
||||
* tools/add-dependency.js
|
||||
* Tool for adding a dependency to a task
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { addDependencyDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the addDependency tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerAddDependencyTool(server) {
|
||||
server.addTool({
|
||||
name: "add_dependency",
|
||||
description: "Add a dependency relationship between two tasks",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("ID of task that will depend on another task"),
|
||||
dependsOn: z.string().describe("ID of task that will become a dependency"),
|
||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`);
|
||||
reportProgress({ progress: 0 });
|
||||
|
||||
// Get project root using the utility function
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
// Fallback to args.projectRoot if session didn't provide one
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
// Call the direct function with the resolved rootFolder
|
||||
const result = await addDependencyDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log, { reportProgress, mcpLog: log, session});
|
||||
|
||||
reportProgress({ progress: 100 });
|
||||
|
||||
// Log result
|
||||
if (result.success) {
|
||||
log.info(`Successfully added dependency: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to add dependency: ${result.error.message}`);
|
||||
}
|
||||
|
||||
// Use handleApiResult to format the response
|
||||
return handleApiResult(result, log, 'Error adding dependency');
|
||||
} catch (error) {
|
||||
log.error(`Error in addDependency tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
63
mcp-server/src/tools/add-subtask.js
Normal file
63
mcp-server/src/tools/add-subtask.js
Normal file
@@ -0,0 +1,63 @@
|
||||
/**
|
||||
* tools/add-subtask.js
|
||||
* Tool for adding subtasks to existing tasks
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { addSubtaskDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the addSubtask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerAddSubtaskTool(server) {
|
||||
server.addTool({
|
||||
name: "add_subtask",
|
||||
description: "Add a subtask to an existing task",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("Parent task ID (required)"),
|
||||
taskId: z.string().optional().describe("Existing task ID to convert to subtask"),
|
||||
title: z.string().optional().describe("Title for the new subtask (when creating a new subtask)"),
|
||||
description: z.string().optional().describe("Description for the new subtask"),
|
||||
details: z.string().optional().describe("Implementation details for the new subtask"),
|
||||
status: z.string().optional().describe("Status for the new subtask (default: 'pending')"),
|
||||
dependencies: z.string().optional().describe("Comma-separated list of dependency IDs for the new subtask"),
|
||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||
skipGenerate: z.boolean().optional().describe("Skip regenerating task files"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await addSubtaskDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log, { reportProgress, mcpLog: log, session});
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Subtask added successfully: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to add subtask: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error adding subtask');
|
||||
} catch (error) {
|
||||
log.error(`Error in addSubtask tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
66
mcp-server/src/tools/add-task.js
Normal file
66
mcp-server/src/tools/add-task.js
Normal file
@@ -0,0 +1,66 @@
|
||||
/**
|
||||
* tools/add-task.js
|
||||
* Tool to add a new task using AI
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
createContentResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { addTaskDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the add-task tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
* @param {AsyncOperationManager} asyncManager - The async operation manager instance.
|
||||
*/
|
||||
export function registerAddTaskTool(server, asyncManager) {
|
||||
server.addTool({
|
||||
name: "add_task",
|
||||
description: "Starts adding a new task using AI in the background.",
|
||||
parameters: z.object({
|
||||
prompt: z.string().describe("Description of the task to add"),
|
||||
dependencies: z.string().optional().describe("Comma-separated list of task IDs this task depends on"),
|
||||
priority: z.string().optional().describe("Task priority (high, medium, low)"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, context) => {
|
||||
const { log, reportProgress, session } = context;
|
||||
try {
|
||||
log.info(`MCP add_task request received with prompt: \"${args.prompt}\"`);
|
||||
|
||||
if (!args.prompt) {
|
||||
return createErrorResponse("Prompt is required for add_task.", "VALIDATION_ERROR");
|
||||
}
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const directArgs = {
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
};
|
||||
|
||||
const operationId = asyncManager.addOperation(addTaskDirect, directArgs, context);
|
||||
|
||||
log.info(`Started background operation for add_task. Operation ID: ${operationId}`);
|
||||
|
||||
return createContentResponse({
|
||||
message: "Add task operation started successfully.",
|
||||
operationId: operationId
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
log.error(`Error initiating add_task operation: ${error.message}`, { stack: error.stack });
|
||||
return createErrorResponse(`Failed to start add task operation: ${error.message}`, "ADD_TASK_INIT_ERROR");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
/**
|
||||
* tools/addTask.js
|
||||
* Tool to add a new task using AI
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
|
||||
/**
|
||||
* Register the addTask tool with the MCP server
|
||||
* @param {FastMCP} server - FastMCP server instance
|
||||
*/
|
||||
export function registerAddTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "addTask",
|
||||
description: "Add a new task using AI",
|
||||
parameters: z.object({
|
||||
prompt: z.string().describe("Description of the task to add"),
|
||||
dependencies: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe("Comma-separated list of task IDs this task depends on"),
|
||||
priority: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe("Task priority (high, medium, low)"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Adding new task: ${args.prompt}`);
|
||||
|
||||
const cmdArgs = [`--prompt="${args.prompt}"`];
|
||||
if (args.dependencies)
|
||||
cmdArgs.push(`--dependencies=${args.dependencies}`);
|
||||
if (args.priority) cmdArgs.push(`--priority=${args.priority}`);
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"add-task",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error adding task: ${error.message}`);
|
||||
return createErrorResponse(`Error adding task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
63
mcp-server/src/tools/analyze.js
Normal file
63
mcp-server/src/tools/analyze.js
Normal file
@@ -0,0 +1,63 @@
|
||||
/**
|
||||
* tools/analyze.js
|
||||
* Tool for analyzing task complexity and generating recommendations
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { analyzeTaskComplexityDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the analyze tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerAnalyzeTool(server) {
|
||||
server.addTool({
|
||||
name: "analyze_project_complexity",
|
||||
description: "Analyze task complexity and generate expansion recommendations",
|
||||
parameters: z.object({
|
||||
output: z.string().optional().describe("Output file path for the report (default: scripts/task-complexity-report.json)"),
|
||||
model: z.string().optional().describe("LLM model to use for analysis (defaults to configured model)"),
|
||||
threshold: z.union([z.number(), z.string()]).optional().describe("Minimum complexity score to recommend expansion (1-10) (default: 5)"),
|
||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed complexity analysis"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await analyzeTaskComplexityDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Task complexity analysis complete: ${result.data.message}`);
|
||||
log.info(`Report summary: ${JSON.stringify(result.data.reportSummary)}`);
|
||||
} else {
|
||||
log.error(`Failed to analyze task complexity: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error analyzing task complexity');
|
||||
} catch (error) {
|
||||
log.error(`Error in analyze tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
63
mcp-server/src/tools/clear-subtasks.js
Normal file
63
mcp-server/src/tools/clear-subtasks.js
Normal file
@@ -0,0 +1,63 @@
|
||||
/**
|
||||
* tools/clear-subtasks.js
|
||||
* Tool for clearing subtasks from parent tasks
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { clearSubtasksDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the clearSubtasks tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerClearSubtasksTool(server) {
|
||||
server.addTool({
|
||||
name: "clear_subtasks",
|
||||
description: "Clear subtasks from specified tasks",
|
||||
parameters: z.object({
|
||||
id: z.string().optional().describe("Task IDs (comma-separated) to clear subtasks from"),
|
||||
all: z.boolean().optional().describe("Clear subtasks from all tasks"),
|
||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}).refine(data => data.id || data.all, {
|
||||
message: "Either 'id' or 'all' parameter must be provided",
|
||||
path: ["id", "all"]
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
||||
await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await clearSubtasksDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log, { reportProgress, mcpLog: log, session});
|
||||
|
||||
reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Subtasks cleared successfully: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to clear subtasks: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error clearing subtasks');
|
||||
} catch (error) {
|
||||
log.error(`Error in clearSubtasks tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
58
mcp-server/src/tools/complexity-report.js
Normal file
58
mcp-server/src/tools/complexity-report.js
Normal file
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
* tools/complexity-report.js
|
||||
* Tool for displaying the complexity analysis report
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { complexityReportDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the complexityReport tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerComplexityReportTool(server) {
|
||||
server.addTool({
|
||||
name: "complexity_report",
|
||||
description: "Display the complexity analysis report in a readable format",
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe("Path to the report file (default: scripts/task-complexity-report.json)"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await complexityReportDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}`);
|
||||
} else {
|
||||
log.error(`Failed to retrieve complexity report: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error retrieving complexity report');
|
||||
} catch (error) {
|
||||
log.error(`Error in complexity-report tool: ${error.message}`);
|
||||
return createErrorResponse(`Failed to retrieve complexity report: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
62
mcp-server/src/tools/expand-all.js
Normal file
62
mcp-server/src/tools/expand-all.js
Normal file
@@ -0,0 +1,62 @@
|
||||
/**
|
||||
* tools/expand-all.js
|
||||
* Tool for expanding all pending tasks with subtasks
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { expandAllTasksDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the expandAll tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerExpandAllTool(server) {
|
||||
server.addTool({
|
||||
name: "expand_all",
|
||||
description: "Expand all pending tasks into subtasks",
|
||||
parameters: z.object({
|
||||
num: z.union([z.number(), z.string()]).optional().describe("Number of subtasks to generate for each task"),
|
||||
research: z.boolean().optional().describe("Enable Perplexity AI for research-backed subtask generation"),
|
||||
prompt: z.string().optional().describe("Additional context to guide subtask generation"),
|
||||
force: z.boolean().optional().describe("Force regeneration of subtasks for tasks that already have them"),
|
||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await expandAllTasksDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully expanded all tasks: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to expand all tasks: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error expanding all tasks');
|
||||
} catch (error) {
|
||||
log.error(`Error in expand-all tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
68
mcp-server/src/tools/expand-task.js
Normal file
68
mcp-server/src/tools/expand-task.js
Normal file
@@ -0,0 +1,68 @@
|
||||
/**
|
||||
* tools/expand-task.js
|
||||
* Tool to expand a task into subtasks
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { expandTaskDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the expand-task tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerExpandTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "expand_task",
|
||||
description: "Expand a task into subtasks for detailed implementation",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("ID of task to expand"),
|
||||
num: z.union([z.number(), z.string()]).optional().describe("Number of subtasks to generate"),
|
||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed generation"),
|
||||
prompt: z.string().optional().describe("Additional context for subtask generation"),
|
||||
force: z.boolean().optional().describe("Force regeneration even for tasks that already have subtasks"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Expanding task with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await expandTaskDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully expanded task with ID ${args.id}`);
|
||||
} else {
|
||||
log.error(`Failed to expand task: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error expanding task');
|
||||
} catch (error) {
|
||||
log.error(`Error in expand task tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
/**
|
||||
* tools/expandTask.js
|
||||
* Tool to break down a task into detailed subtasks
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
|
||||
/**
|
||||
* Register the expandTask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerExpandTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "expandTask",
|
||||
description: "Break down a task into detailed subtasks",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("Task ID to expand"),
|
||||
num: z.number().optional().describe("Number of subtasks to generate"),
|
||||
research: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
"Enable Perplexity AI for research-backed subtask generation"
|
||||
),
|
||||
prompt: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe("Additional context to guide subtask generation"),
|
||||
force: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
"Force regeneration of subtasks for tasks that already have them"
|
||||
),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Expanding task ${args.id}`);
|
||||
|
||||
const cmdArgs = [`--id=${args.id}`];
|
||||
if (args.num) cmdArgs.push(`--num=${args.num}`);
|
||||
if (args.research) cmdArgs.push("--research");
|
||||
if (args.prompt) cmdArgs.push(`--prompt="${args.prompt}"`);
|
||||
if (args.force) cmdArgs.push("--force");
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"expand",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error expanding task: ${error.message}`);
|
||||
return createErrorResponse(`Error expanding task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
58
mcp-server/src/tools/fix-dependencies.js
Normal file
58
mcp-server/src/tools/fix-dependencies.js
Normal file
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
* tools/fix-dependencies.js
|
||||
* Tool for automatically fixing invalid task dependencies
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { fixDependenciesDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the fixDependencies tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerFixDependenciesTool(server) {
|
||||
server.addTool({
|
||||
name: "fix_dependencies",
|
||||
description: "Fix invalid dependencies in tasks automatically",
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);
|
||||
await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await fixDependenciesDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log, { reportProgress, mcpLog: log, session});
|
||||
|
||||
await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully fixed dependencies: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to fix dependencies: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error fixing dependencies');
|
||||
} catch (error) {
|
||||
log.error(`Error in fixDependencies tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
64
mcp-server/src/tools/generate.js
Normal file
64
mcp-server/src/tools/generate.js
Normal file
@@ -0,0 +1,64 @@
|
||||
/**
|
||||
* tools/generate.js
|
||||
* Tool to generate individual task files from tasks.json
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { generateTaskFilesDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the generate tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerGenerateTool(server) {
|
||||
server.addTool({
|
||||
name: "generate",
|
||||
description: "Generates individual task files in tasks/ directory based on tasks.json",
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
output: z.string().optional().describe("Output directory (default: same directory as tasks file)"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await generateTaskFilesDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully generated task files: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to generate task files: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error generating task files');
|
||||
} catch (error) {
|
||||
log.error(`Error in generate tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
42
mcp-server/src/tools/get-operation-status.js
Normal file
42
mcp-server/src/tools/get-operation-status.js
Normal file
@@ -0,0 +1,42 @@
|
||||
// mcp-server/src/tools/get-operation-status.js
|
||||
import { z } from 'zod';
|
||||
import { createErrorResponse, createContentResponse } from './utils.js'; // Assuming these utils exist
|
||||
|
||||
/**
|
||||
* Register the get_operation_status tool.
|
||||
* @param {FastMCP} server - FastMCP server instance.
|
||||
* @param {AsyncOperationManager} asyncManager - The async operation manager.
|
||||
*/
|
||||
export function registerGetOperationStatusTool(server, asyncManager) {
|
||||
server.addTool({
|
||||
name: 'get_operation_status',
|
||||
description: 'Retrieves the status and result/error of a background operation.',
|
||||
parameters: z.object({
|
||||
operationId: z.string().describe('The ID of the operation to check.'),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
const { operationId } = args;
|
||||
log.info(`Checking status for operation ID: ${operationId}`);
|
||||
|
||||
const status = asyncManager.getStatus(operationId);
|
||||
|
||||
// Status will now always return an object, but it might have status='not_found'
|
||||
if (status.status === 'not_found') {
|
||||
log.warn(`Operation ID not found: ${operationId}`);
|
||||
return createErrorResponse(
|
||||
status.error?.message || `Operation ID not found: ${operationId}`,
|
||||
status.error?.code || 'OPERATION_NOT_FOUND'
|
||||
);
|
||||
}
|
||||
|
||||
log.info(`Status for ${operationId}: ${status.status}`);
|
||||
return createContentResponse(status);
|
||||
|
||||
} catch (error) {
|
||||
log.error(`Error in get_operation_status tool: ${error.message}`, { stack: error.stack });
|
||||
return createErrorResponse(`Failed to get operation status: ${error.message}`, 'GET_STATUS_ERROR');
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
92
mcp-server/src/tools/get-task.js
Normal file
92
mcp-server/src/tools/get-task.js
Normal file
@@ -0,0 +1,92 @@
|
||||
/**
|
||||
* tools/get-task.js
|
||||
* Tool to get task details by ID
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { showTaskDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Custom processor function that removes allTasks from the response
|
||||
* @param {Object} data - The data returned from showTaskDirect
|
||||
* @returns {Object} - The processed data with allTasks removed
|
||||
*/
|
||||
function processTaskResponse(data) {
|
||||
if (!data) return data;
|
||||
|
||||
// If we have the expected structure with task and allTasks
|
||||
if (data.task) {
|
||||
// Return only the task object, removing the allTasks array
|
||||
return data.task;
|
||||
}
|
||||
|
||||
// If structure is unexpected, return as is
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the get-task tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerShowTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "get_task",
|
||||
description: "Get detailed information about a specific task",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("Task ID to get"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
// Log the session right at the start of execute
|
||||
log.info(`Session object received in execute: ${JSON.stringify(session)}`); // Use JSON.stringify for better visibility
|
||||
|
||||
try {
|
||||
log.info(`Getting task details for ID: ${args.id}`);
|
||||
|
||||
log.info(`Session object received in execute: ${JSON.stringify(session)}`); // Use JSON.stringify for better visibility
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
} else if (!rootFolder) {
|
||||
// Ensure we always have *some* root, even if session failed and args didn't provide one
|
||||
rootFolder = process.cwd();
|
||||
log.warn(`Session and args failed to provide root, using CWD: ${rootFolder}`);
|
||||
}
|
||||
|
||||
log.info(`Attempting to use project root: ${rootFolder}`); // Log the final resolved root
|
||||
|
||||
log.info(`Root folder: ${rootFolder}`); // Log the final resolved root
|
||||
const result = await showTaskDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log);
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully retrieved task details for ID: ${args.id}${result.fromCache ? ' (from cache)' : ''}`);
|
||||
} else {
|
||||
log.error(`Failed to get task: ${result.error.message}`);
|
||||
}
|
||||
|
||||
// Use our custom processor function to remove allTasks from the response
|
||||
return handleApiResult(result, log, 'Error retrieving task details', processTaskResponse);
|
||||
} catch (error) {
|
||||
log.error(`Error in get-task tool: ${error.message}\n${error.stack}`); // Add stack trace
|
||||
return createErrorResponse(`Failed to get task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
65
mcp-server/src/tools/get-tasks.js
Normal file
65
mcp-server/src/tools/get-tasks.js
Normal file
@@ -0,0 +1,65 @@
|
||||
/**
|
||||
* tools/get-tasks.js
|
||||
* Tool to get all tasks from Task Master
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
createErrorResponse,
|
||||
handleApiResult,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { listTasksDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the getTasks tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerListTasksTool(server) {
|
||||
server.addTool({
|
||||
name: "get_tasks",
|
||||
description: "Get all tasks from Task Master, optionally filtering by status and including subtasks.",
|
||||
parameters: z.object({
|
||||
status: z.string().optional().describe("Filter tasks by status (e.g., 'pending', 'done')"),
|
||||
withSubtasks: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe("Include subtasks nested within their parent tasks in the response"),
|
||||
file: z.string().optional().describe("Path to the tasks file (relative to project root or absolute)"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: automatically detected from session or CWD)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Getting tasks with filters: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await listTasksDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
log.info(`Retrieved ${result.success ? (result.data?.tasks?.length || 0) : 0} tasks${result.fromCache ? ' (from cache)' : ''}`);
|
||||
return handleApiResult(result, log, 'Error getting tasks');
|
||||
} catch (error) {
|
||||
log.error(`Error getting tasks: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// We no longer need the formatTasksResponse function as we're returning raw JSON data
|
||||
@@ -3,25 +3,71 @@
|
||||
* Export all Task Master CLI tools for MCP server
|
||||
*/
|
||||
|
||||
import { registerListTasksTool } from "./get-tasks.js";
|
||||
import logger from "../logger.js";
|
||||
import { registerListTasksTool } from "./listTasks.js";
|
||||
import { registerShowTaskTool } from "./showTask.js";
|
||||
import { registerSetTaskStatusTool } from "./setTaskStatus.js";
|
||||
import { registerExpandTaskTool } from "./expandTask.js";
|
||||
import { registerNextTaskTool } from "./nextTask.js";
|
||||
import { registerAddTaskTool } from "./addTask.js";
|
||||
import { registerSetTaskStatusTool } from "./set-task-status.js";
|
||||
import { registerParsePRDTool } from "./parse-prd.js";
|
||||
import { registerUpdateTool } from "./update.js";
|
||||
import { registerUpdateTaskTool } from "./update-task.js";
|
||||
import { registerUpdateSubtaskTool } from "./update-subtask.js";
|
||||
import { registerGenerateTool } from "./generate.js";
|
||||
import { registerShowTaskTool } from "./get-task.js";
|
||||
import { registerNextTaskTool } from "./next-task.js";
|
||||
import { registerExpandTaskTool } from "./expand-task.js";
|
||||
import { registerAddTaskTool } from "./add-task.js";
|
||||
import { registerAddSubtaskTool } from "./add-subtask.js";
|
||||
import { registerRemoveSubtaskTool } from "./remove-subtask.js";
|
||||
import { registerAnalyzeTool } from "./analyze.js";
|
||||
import { registerClearSubtasksTool } from "./clear-subtasks.js";
|
||||
import { registerExpandAllTool } from "./expand-all.js";
|
||||
import { registerRemoveDependencyTool } from "./remove-dependency.js";
|
||||
import { registerValidateDependenciesTool } from "./validate-dependencies.js";
|
||||
import { registerFixDependenciesTool } from "./fix-dependencies.js";
|
||||
import { registerComplexityReportTool } from "./complexity-report.js";
|
||||
import { registerAddDependencyTool } from "./add-dependency.js";
|
||||
import { registerRemoveTaskTool } from './remove-task.js';
|
||||
import { registerInitializeProjectTool } from './initialize-project.js';
|
||||
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
||||
import { registerGetOperationStatusTool } from './get-operation-status.js';
|
||||
|
||||
/**
|
||||
* Register all Task Master tools with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
* @param {asyncOperationManager} asyncManager - The async operation manager instance
|
||||
*/
|
||||
export function registerTaskMasterTools(server) {
|
||||
export function registerTaskMasterTools(server, asyncManager) {
|
||||
try {
|
||||
// Register each tool
|
||||
registerListTasksTool(server);
|
||||
registerShowTaskTool(server);
|
||||
registerSetTaskStatusTool(server);
|
||||
registerExpandTaskTool(server);
|
||||
registerParsePRDTool(server);
|
||||
registerUpdateTool(server);
|
||||
registerUpdateTaskTool(server);
|
||||
registerUpdateSubtaskTool(server);
|
||||
registerGenerateTool(server);
|
||||
registerShowTaskTool(server);
|
||||
registerNextTaskTool(server);
|
||||
registerAddTaskTool(server);
|
||||
registerExpandTaskTool(server);
|
||||
registerAddTaskTool(server, asyncManager);
|
||||
registerAddSubtaskTool(server);
|
||||
registerRemoveSubtaskTool(server);
|
||||
registerAnalyzeTool(server);
|
||||
registerClearSubtasksTool(server);
|
||||
registerExpandAllTool(server);
|
||||
registerRemoveDependencyTool(server);
|
||||
registerValidateDependenciesTool(server);
|
||||
registerFixDependenciesTool(server);
|
||||
registerComplexityReportTool(server);
|
||||
registerAddDependencyTool(server);
|
||||
registerRemoveTaskTool(server);
|
||||
registerInitializeProjectTool(server);
|
||||
registerGetOperationStatusTool(server, asyncManager);
|
||||
} catch (error) {
|
||||
logger.error(`Error registering Task Master tools: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
|
||||
logger.info('Registered Task Master MCP tools');
|
||||
}
|
||||
|
||||
export default {
|
||||
|
||||
62
mcp-server/src/tools/initialize-project.js
Normal file
62
mcp-server/src/tools/initialize-project.js
Normal file
@@ -0,0 +1,62 @@
|
||||
import { z } from "zod";
|
||||
import { execSync } from 'child_process';
|
||||
import { createContentResponse, createErrorResponse } from "./utils.js"; // Only need response creators
|
||||
|
||||
export function registerInitializeProjectTool(server) {
|
||||
server.addTool({
|
||||
name: "initialize_project", // snake_case for tool name
|
||||
description: "Initializes a new Task Master project structure in the current working directory by running 'task-master init'.",
|
||||
parameters: z.object({
|
||||
projectName: z.string().optional().describe("The name for the new project."),
|
||||
projectDescription: z.string().optional().describe("A brief description for the project."),
|
||||
projectVersion: z.string().optional().describe("The initial version for the project (e.g., '0.1.0')."),
|
||||
authorName: z.string().optional().describe("The author's name."),
|
||||
skipInstall: z.boolean().optional().default(false).describe("Skip installing dependencies automatically."),
|
||||
addAliases: z.boolean().optional().default(false).describe("Add shell aliases (tm, taskmaster) to shell config file."),
|
||||
yes: z.boolean().optional().default(false).describe("Skip prompts and use default values or provided arguments."),
|
||||
// projectRoot is not needed here as 'init' works on the current directory
|
||||
}),
|
||||
execute: async (args, { log }) => { // Destructure context to get log
|
||||
try {
|
||||
log.info(`Executing initialize_project with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Construct the command arguments carefully
|
||||
// Using npx ensures it uses the locally installed version if available, or fetches it
|
||||
let command = 'npx task-master init';
|
||||
const cliArgs = [];
|
||||
if (args.projectName) cliArgs.push(`--name "${args.projectName.replace(/"/g, '\\"')}"`); // Escape quotes
|
||||
if (args.projectDescription) cliArgs.push(`--description "${args.projectDescription.replace(/"/g, '\\"')}"`);
|
||||
if (args.projectVersion) cliArgs.push(`--version "${args.projectVersion.replace(/"/g, '\\"')}"`);
|
||||
if (args.authorName) cliArgs.push(`--author "${args.authorName.replace(/"/g, '\\"')}"`);
|
||||
if (args.skipInstall) cliArgs.push('--skip-install');
|
||||
if (args.addAliases) cliArgs.push('--aliases');
|
||||
if (args.yes) cliArgs.push('--yes');
|
||||
|
||||
command += ' ' + cliArgs.join(' ');
|
||||
|
||||
log.info(`Constructed command: ${command}`);
|
||||
|
||||
// Execute the command in the current working directory of the server process
|
||||
// Capture stdout/stderr. Use a reasonable timeout (e.g., 5 minutes)
|
||||
const output = execSync(command, { encoding: 'utf8', stdio: 'pipe', timeout: 300000 });
|
||||
|
||||
log.info(`Initialization output:\n${output}`);
|
||||
|
||||
// Return a standard success response manually
|
||||
return createContentResponse(
|
||||
"Project initialized successfully.",
|
||||
{ output: output } // Include output in the data payload
|
||||
);
|
||||
|
||||
} catch (error) {
|
||||
// Catch errors from execSync or timeouts
|
||||
const errorMessage = `Project initialization failed: ${error.message}`;
|
||||
const errorDetails = error.stderr?.toString() || error.stdout?.toString() || error.message; // Provide stderr/stdout if available
|
||||
log.error(`${errorMessage}\nDetails: ${errorDetails}`);
|
||||
|
||||
// Return a standard error response manually
|
||||
return createErrorResponse(errorMessage, { details: errorDetails });
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
/**
|
||||
* tools/listTasks.js
|
||||
* Tool to list all tasks from Task Master
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
createErrorResponse,
|
||||
handleApiResult
|
||||
} from "./utils.js";
|
||||
import { listTasksDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the listTasks tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerListTasksTool(server) {
|
||||
server.addTool({
|
||||
name: "listTasks",
|
||||
description: "List all tasks from Task Master",
|
||||
parameters: z.object({
|
||||
status: z.string().optional().describe("Filter tasks by status"),
|
||||
withSubtasks: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe("Include subtasks in the response"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Listing tasks with filters: ${JSON.stringify(args)}`);
|
||||
|
||||
// Call core function - args contains projectRoot which is handled internally
|
||||
const result = await listTasksDirect(args, log);
|
||||
|
||||
// Log result and use handleApiResult utility
|
||||
log.info(`Retrieved ${result.success ? (result.data?.tasks?.length || 0) : 0} tasks`);
|
||||
return handleApiResult(result, log, 'Error listing tasks');
|
||||
} catch (error) {
|
||||
log.error(`Error listing tasks: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// We no longer need the formatTasksResponse function as we're returning raw JSON data
|
||||
63
mcp-server/src/tools/next-task.js
Normal file
63
mcp-server/src/tools/next-task.js
Normal file
@@ -0,0 +1,63 @@
|
||||
/**
|
||||
* tools/next-task.js
|
||||
* Tool to find the next task to work on
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { nextTaskDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the next-task tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerNextTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "next_task",
|
||||
description: "Find the next task to work on based on dependencies and status",
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Finding next task with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await nextTaskDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully found next task: ${result.data?.task?.id || 'No available tasks'}`);
|
||||
} else {
|
||||
log.error(`Failed to find next task: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error finding next task');
|
||||
} catch (error) {
|
||||
log.error(`Error in nextTask tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
/**
|
||||
* tools/nextTask.js
|
||||
* Tool to show the next task to work on based on dependencies and status
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
|
||||
/**
|
||||
* Register the nextTask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerNextTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "nextTask",
|
||||
description:
|
||||
"Show the next task to work on based on dependencies and status",
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Finding next task to work on`);
|
||||
|
||||
const cmdArgs = [];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"next",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error finding next task: ${error.message}`);
|
||||
return createErrorResponse(`Error finding next task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
65
mcp-server/src/tools/parse-prd.js
Normal file
65
mcp-server/src/tools/parse-prd.js
Normal file
@@ -0,0 +1,65 @@
|
||||
/**
|
||||
* tools/parsePRD.js
|
||||
* Tool to parse PRD document and generate tasks
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { parsePRDDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the parsePRD tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerParsePRDTool(server) {
|
||||
server.addTool({
|
||||
name: "parse_prd",
|
||||
description: "Parse a Product Requirements Document (PRD) or text file to automatically generate initial tasks.",
|
||||
parameters: z.object({
|
||||
input: z.string().default("tasks/tasks.json").describe("Path to the PRD document file (relative to project root or absolute)"),
|
||||
numTasks: z.string().optional().describe("Approximate number of top-level tasks to generate (default: 10)"),
|
||||
output: z.string().optional().describe("Output path for tasks.json file (relative to project root or absolute, default: tasks/tasks.json)"),
|
||||
force: z.boolean().optional().describe("Allow overwriting an existing tasks.json file."),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: automatically detected from session or CWD)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Parsing PRD with args: ${JSON.stringify(args)}`);
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await parsePRDDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully parsed PRD: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to parse PRD: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error parsing PRD');
|
||||
} catch (error) {
|
||||
log.error(`Error in parse-prd tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
60
mcp-server/src/tools/remove-dependency.js
Normal file
60
mcp-server/src/tools/remove-dependency.js
Normal file
@@ -0,0 +1,60 @@
|
||||
/**
|
||||
* tools/remove-dependency.js
|
||||
* Tool for removing a dependency from a task
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { removeDependencyDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the removeDependency tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerRemoveDependencyTool(server) {
|
||||
server.addTool({
|
||||
name: "remove_dependency",
|
||||
description: "Remove a dependency from a task",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("Task ID to remove dependency from"),
|
||||
dependsOn: z.string().describe("Task ID to remove as a dependency"),
|
||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await removeDependencyDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully removed dependency: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to remove dependency: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error removing dependency');
|
||||
} catch (error) {
|
||||
log.error(`Error in removeDependency tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
61
mcp-server/src/tools/remove-subtask.js
Normal file
61
mcp-server/src/tools/remove-subtask.js
Normal file
@@ -0,0 +1,61 @@
|
||||
/**
|
||||
* tools/remove-subtask.js
|
||||
* Tool for removing subtasks from parent tasks
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { removeSubtaskDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the removeSubtask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerRemoveSubtaskTool(server) {
|
||||
server.addTool({
|
||||
name: "remove_subtask",
|
||||
description: "Remove a subtask from its parent task",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("Subtask ID to remove in format 'parentId.subtaskId' (required)"),
|
||||
convert: z.boolean().optional().describe("Convert the subtask to a standalone task instead of deleting it"),
|
||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||
skipGenerate: z.boolean().optional().describe("Skip regenerating task files"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await removeSubtaskDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Subtask removed successfully: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to remove subtask: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error removing subtask');
|
||||
} catch (error) {
|
||||
log.error(`Error in removeSubtask tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
71
mcp-server/src/tools/remove-task.js
Normal file
71
mcp-server/src/tools/remove-task.js
Normal file
@@ -0,0 +1,71 @@
|
||||
/**
|
||||
* tools/remove-task.js
|
||||
* Tool to remove a task by ID
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { removeTaskDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the remove-task tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerRemoveTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "remove_task",
|
||||
description: "Remove a task or subtask permanently from the tasks list",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("ID of the task or subtask to remove (e.g., '5' or '5.2')"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
confirm: z.boolean().optional().describe("Whether to skip confirmation prompt (default: false)")
|
||||
}),
|
||||
execute: async (args, { log, session }) => {
|
||||
try {
|
||||
log.info(`Removing task with ID: ${args.id}`);
|
||||
|
||||
// Get project root from session
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
} else if (!rootFolder) {
|
||||
// Ensure we have a default if nothing else works
|
||||
rootFolder = process.cwd();
|
||||
log.warn(`Session and args failed to provide root, using CWD: ${rootFolder}`);
|
||||
}
|
||||
|
||||
log.info(`Using project root: ${rootFolder}`);
|
||||
|
||||
// Assume client has already handled confirmation if needed
|
||||
const result = await removeTaskDirect({
|
||||
id: args.id,
|
||||
file: args.file,
|
||||
projectRoot: rootFolder
|
||||
}, log);
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully removed task: ${args.id}`);
|
||||
} else {
|
||||
log.error(`Failed to remove task: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error removing task');
|
||||
} catch (error) {
|
||||
log.error(`Error in remove-task tool: ${error.message}`);
|
||||
return createErrorResponse(`Failed to remove task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
69
mcp-server/src/tools/set-task-status.js
Normal file
69
mcp-server/src/tools/set-task-status.js
Normal file
@@ -0,0 +1,69 @@
|
||||
/**
|
||||
* tools/setTaskStatus.js
|
||||
* Tool to set the status of a task
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { setTaskStatusDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the setTaskStatus tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerSetTaskStatusTool(server) {
|
||||
server.addTool({
|
||||
name: "set_task_status",
|
||||
description: "Set the status of one or more tasks or subtasks.",
|
||||
parameters: z.object({
|
||||
id: z
|
||||
.string()
|
||||
.describe("Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated for multiple updates."),
|
||||
status: z
|
||||
.string()
|
||||
.describe("New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: automatically detected)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await setTaskStatusDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully updated status for task(s) ${args.id} to "${args.status}": ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to update task status: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error setting task status');
|
||||
} catch (error) {
|
||||
log.error(`Error in setTaskStatus tool: ${error.message}`);
|
||||
return createErrorResponse(`Error setting task status: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
/**
|
||||
* tools/setTaskStatus.js
|
||||
* Tool to set the status of a task
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
|
||||
/**
|
||||
* Register the setTaskStatus tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerSetTaskStatusTool(server) {
|
||||
server.addTool({
|
||||
name: "setTaskStatus",
|
||||
description: "Set the status of a task",
|
||||
parameters: z.object({
|
||||
id: z
|
||||
.string()
|
||||
.describe("Task ID (can be comma-separated for multiple tasks)"),
|
||||
status: z
|
||||
.string()
|
||||
.describe("New status (todo, in-progress, review, done)"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
|
||||
|
||||
const cmdArgs = [`--id=${args.id}`, `--status=${args.status}`];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"set-status",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error setting task status: ${error.message}`);
|
||||
return createErrorResponse(
|
||||
`Error setting task status: ${error.message}`
|
||||
);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
/**
|
||||
* tools/showTask.js
|
||||
* Tool to show detailed information about a specific task
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createErrorResponse,
|
||||
handleApiResult
|
||||
} from "./utils.js";
|
||||
|
||||
/**
|
||||
* Register the showTask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerShowTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "showTask",
|
||||
description: "Show detailed information about a specific task",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("Task ID to show"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Showing task details for ID: ${args.id}`);
|
||||
|
||||
// Prepare arguments for CLI command
|
||||
const cmdArgs = [`--id=${args.id}`];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
// Execute the command - function now handles project root internally
|
||||
const result = executeTaskMasterCommand(
|
||||
"show",
|
||||
log,
|
||||
cmdArgs,
|
||||
args.projectRoot // Pass raw project root, function will normalize it
|
||||
);
|
||||
|
||||
// Process CLI result into API result format for handleApiResult
|
||||
if (result.success) {
|
||||
try {
|
||||
// Try to parse response as JSON
|
||||
const data = JSON.parse(result.stdout);
|
||||
// Return equivalent of a successful API call with data
|
||||
return handleApiResult({ success: true, data }, log, 'Error showing task');
|
||||
} catch (e) {
|
||||
// If parsing fails, still return success but with raw string data
|
||||
return handleApiResult(
|
||||
{ success: true, data: result.stdout },
|
||||
log,
|
||||
'Error showing task',
|
||||
// Skip data processing for string data
|
||||
null
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Return equivalent of a failed API call
|
||||
return handleApiResult(
|
||||
{ success: false, error: { message: result.error } },
|
||||
log,
|
||||
'Error showing task'
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error showing task: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
66
mcp-server/src/tools/update-subtask.js
Normal file
66
mcp-server/src/tools/update-subtask.js
Normal file
@@ -0,0 +1,66 @@
|
||||
/**
|
||||
* tools/update-subtask.js
|
||||
* Tool to append additional information to a specific subtask
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { updateSubtaskByIdDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the update-subtask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerUpdateSubtaskTool(server) {
|
||||
server.addTool({
|
||||
name: "update_subtask",
|
||||
description: "Appends additional information to a specific subtask without replacing existing content",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("ID of the subtask to update in format \"parentId.subtaskId\" (e.g., \"5.2\")"),
|
||||
prompt: z.string().describe("Information to add to the subtask"),
|
||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await updateSubtaskByIdDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully updated subtask with ID ${args.id}`);
|
||||
} else {
|
||||
log.error(`Failed to update subtask: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error updating subtask');
|
||||
} catch (error) {
|
||||
log.error(`Error in update_subtask tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
66
mcp-server/src/tools/update-task.js
Normal file
66
mcp-server/src/tools/update-task.js
Normal file
@@ -0,0 +1,66 @@
|
||||
/**
|
||||
* tools/update-task.js
|
||||
* Tool to update a single task by ID with new information
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { updateTaskByIdDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the update-task tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerUpdateTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "update_task",
|
||||
description: "Updates a single task by ID with new information or context provided in the prompt.",
|
||||
parameters: z.object({
|
||||
id: z.union([z.number(), z.string()]).describe("ID of the task or subtask (e.g., '15', '15.2') to update"),
|
||||
prompt: z.string().describe("New information or context to incorporate into the task"),
|
||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Updating task with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await updateTaskByIdDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully updated task with ID ${args.id}`);
|
||||
} else {
|
||||
log.error(`Failed to update task: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error updating task');
|
||||
} catch (error) {
|
||||
log.error(`Error in update_task tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
66
mcp-server/src/tools/update.js
Normal file
66
mcp-server/src/tools/update.js
Normal file
@@ -0,0 +1,66 @@
|
||||
/**
|
||||
* tools/update.js
|
||||
* Tool to update tasks based on new context/prompt
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { updateTasksDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the update tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerUpdateTool(server) {
|
||||
server.addTool({
|
||||
name: "update",
|
||||
description: "Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt.",
|
||||
parameters: z.object({
|
||||
from: z.union([z.number(), z.string()]).describe("Task ID from which to start updating (inclusive)"),
|
||||
prompt: z.string().describe("Explanation of changes or new context to apply"),
|
||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
|
||||
// await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await updateTasksDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||
|
||||
// await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully updated tasks from ID ${args.from}: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to update tasks: ${result.error?.message || 'Unknown error'}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error updating tasks');
|
||||
} catch (error) {
|
||||
log.error(`Error in update tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -5,26 +5,138 @@
|
||||
|
||||
import { spawnSync } from "child_process";
|
||||
import path from "path";
|
||||
import fs from 'fs';
|
||||
import { contextManager } from '../core/context-manager.js'; // Import the singleton
|
||||
|
||||
// Import path utilities to ensure consistent path resolution
|
||||
import { lastFoundProjectRoot, PROJECT_MARKERS } from '../core/utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Get normalized project root path
|
||||
* @param {string|undefined} projectRootRaw - Raw project root from arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {string} - Normalized absolute path to project root
|
||||
*/
|
||||
export function getProjectRoot(projectRootRaw, log) {
|
||||
// Make sure projectRoot is set
|
||||
const rootPath = projectRootRaw || process.cwd();
|
||||
function getProjectRoot(projectRootRaw, log) {
|
||||
// PRECEDENCE ORDER:
|
||||
// 1. Environment variable override
|
||||
// 2. Explicitly provided projectRoot in args
|
||||
// 3. Previously found/cached project root
|
||||
// 4. Current directory if it has project markers
|
||||
// 5. Current directory with warning
|
||||
|
||||
// Ensure projectRoot is absolute
|
||||
const projectRoot = path.isAbsolute(rootPath)
|
||||
? rootPath
|
||||
: path.resolve(process.cwd(), rootPath);
|
||||
// 1. Check for environment variable override
|
||||
if (process.env.TASK_MASTER_PROJECT_ROOT) {
|
||||
const envRoot = process.env.TASK_MASTER_PROJECT_ROOT;
|
||||
const absolutePath = path.isAbsolute(envRoot)
|
||||
? envRoot
|
||||
: path.resolve(process.cwd(), envRoot);
|
||||
log.info(`Using project root from TASK_MASTER_PROJECT_ROOT environment variable: ${absolutePath}`);
|
||||
return absolutePath;
|
||||
}
|
||||
|
||||
log.info(`Using project root: ${projectRoot}`);
|
||||
// 2. If project root is explicitly provided, use it
|
||||
if (projectRootRaw) {
|
||||
const absolutePath = path.isAbsolute(projectRootRaw)
|
||||
? projectRootRaw
|
||||
: path.resolve(process.cwd(), projectRootRaw);
|
||||
|
||||
log.info(`Using explicitly provided project root: ${absolutePath}`);
|
||||
return absolutePath;
|
||||
}
|
||||
|
||||
// 3. If we have a last found project root from a tasks.json search, use that for consistency
|
||||
if (lastFoundProjectRoot) {
|
||||
log.info(`Using last known project root where tasks.json was found: ${lastFoundProjectRoot}`);
|
||||
return lastFoundProjectRoot;
|
||||
}
|
||||
|
||||
// 4. Check if the current directory has any indicators of being a task-master project
|
||||
const currentDir = process.cwd();
|
||||
if (PROJECT_MARKERS.some(marker => {
|
||||
const markerPath = path.join(currentDir, marker);
|
||||
return fs.existsSync(markerPath);
|
||||
})) {
|
||||
log.info(`Using current directory as project root (found project markers): ${currentDir}`);
|
||||
return currentDir;
|
||||
}
|
||||
|
||||
// 5. Default to current working directory but warn the user
|
||||
log.warn(`No task-master project detected in current directory. Using ${currentDir} as project root.`);
|
||||
log.warn('Consider using --project-root to specify the correct project location or set TASK_MASTER_PROJECT_ROOT environment variable.');
|
||||
return currentDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the project root path from the FastMCP session object.
|
||||
* @param {Object} session - The FastMCP session object.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {string|null} - The absolute path to the project root, or null if not found.
|
||||
*/
|
||||
function getProjectRootFromSession(session, log) {
|
||||
try {
|
||||
// If we have a session with roots array
|
||||
if (session?.roots?.[0]?.uri) {
|
||||
const rootUri = session.roots[0].uri;
|
||||
const rootPath = rootUri.startsWith('file://')
|
||||
? decodeURIComponent(rootUri.slice(7))
|
||||
: rootUri;
|
||||
return rootPath;
|
||||
}
|
||||
|
||||
// If we have a session with roots.roots array (different structure)
|
||||
if (session?.roots?.roots?.[0]?.uri) {
|
||||
const rootUri = session.roots.roots[0].uri;
|
||||
const rootPath = rootUri.startsWith('file://')
|
||||
? decodeURIComponent(rootUri.slice(7))
|
||||
: rootUri;
|
||||
return rootPath;
|
||||
}
|
||||
|
||||
// Get the server's location and try to find project root -- this is a fallback necessary in Cursor IDE
|
||||
const serverPath = process.argv[1]; // This should be the path to server.js, which is in mcp-server/
|
||||
if (serverPath && serverPath.includes('mcp-server')) {
|
||||
// Find the mcp-server directory first
|
||||
const mcpServerIndex = serverPath.indexOf('mcp-server');
|
||||
if (mcpServerIndex !== -1) {
|
||||
// Get the path up to mcp-server, which should be the project root
|
||||
const projectRoot = serverPath.substring(0, mcpServerIndex - 1); // -1 to remove trailing slash
|
||||
|
||||
// Verify this looks like our project root by checking for key files/directories
|
||||
if (fs.existsSync(path.join(projectRoot, '.cursor')) ||
|
||||
fs.existsSync(path.join(projectRoot, 'mcp-server')) ||
|
||||
fs.existsSync(path.join(projectRoot, 'package.json'))) {
|
||||
return projectRoot;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, we'll try process.cwd() but only if it's not "/"
|
||||
const cwd = process.cwd();
|
||||
if (cwd !== '/') {
|
||||
return cwd;
|
||||
}
|
||||
|
||||
// Last resort: try to derive from the server path we found earlier
|
||||
if (serverPath) {
|
||||
const mcpServerIndex = serverPath.indexOf('mcp-server');
|
||||
return mcpServerIndex !== -1 ? serverPath.substring(0, mcpServerIndex - 1) : cwd;
|
||||
}
|
||||
|
||||
throw new Error('Could not determine project root');
|
||||
} catch (e) {
|
||||
// If we have a server path, use it as a basis for project root
|
||||
const serverPath = process.argv[1];
|
||||
if (serverPath && serverPath.includes('mcp-server')) {
|
||||
const mcpServerIndex = serverPath.indexOf('mcp-server');
|
||||
return mcpServerIndex !== -1 ? serverPath.substring(0, mcpServerIndex - 1) : process.cwd();
|
||||
}
|
||||
|
||||
// Only use cwd if it's not "/"
|
||||
const cwd = process.cwd();
|
||||
return cwd !== '/' ? cwd : '/';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle API result with standardized error handling and response formatting
|
||||
@@ -34,7 +146,7 @@ export function getProjectRoot(projectRootRaw, log) {
|
||||
* @param {Function} processFunction - Optional function to process successful result data
|
||||
* @returns {Object} - Standardized MCP response object
|
||||
*/
|
||||
export function handleApiResult(result, log, errorPrefix = 'API error', processFunction = processMCPResponseData) {
|
||||
function handleApiResult(result, log, errorPrefix = 'API error', processFunction = processMCPResponseData) {
|
||||
if (!result.success) {
|
||||
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
|
||||
// Include cache status in error logs
|
||||
@@ -66,7 +178,7 @@ export function handleApiResult(result, log, errorPrefix = 'API error', processF
|
||||
* @param {string|undefined} projectRootRaw - Optional raw project root path (will be normalized internally)
|
||||
* @returns {Object} - The result of the command execution
|
||||
*/
|
||||
export function executeTaskMasterCommand(
|
||||
function executeTaskMasterCommand(
|
||||
command,
|
||||
log,
|
||||
args = [],
|
||||
@@ -143,7 +255,7 @@ export function executeTaskMasterCommand(
|
||||
* @returns {Promise<Object>} - An object containing the result, indicating if it was from cache.
|
||||
* Format: { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||
*/
|
||||
export async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
||||
async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
||||
// Check cache first
|
||||
const cachedResult = contextManager.getCachedData(cacheKey);
|
||||
|
||||
@@ -180,95 +292,6 @@ export async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a Task Master tool action with standardized error handling, logging, and response formatting.
|
||||
* Integrates caching logic via getCachedOrExecute if a cacheKeyGenerator is provided.
|
||||
*
|
||||
* @param {Object} options - Options for executing the tool action
|
||||
* @param {Function} options.actionFn - The core action function (e.g., listTasksDirect) to execute. Should return {success, data, error}.
|
||||
* @param {Object} options.args - Arguments for the action, passed to actionFn and cacheKeyGenerator.
|
||||
* @param {Object} options.log - Logger object from FastMCP.
|
||||
* @param {string} options.actionName - Name of the action for logging purposes.
|
||||
* @param {Function} [options.cacheKeyGenerator] - Optional function to generate a cache key based on args. If provided, caching is enabled.
|
||||
* @param {Function} [options.processResult=processMCPResponseData] - Optional function to process the result data before returning.
|
||||
* @returns {Promise<Object>} - Standardized response for FastMCP.
|
||||
*/
|
||||
export async function executeMCPToolAction({
|
||||
actionFn,
|
||||
args,
|
||||
log,
|
||||
actionName,
|
||||
cacheKeyGenerator, // Note: We decided not to use this for listTasks for now
|
||||
processResult = processMCPResponseData
|
||||
}) {
|
||||
try {
|
||||
// Log the action start
|
||||
log.info(`${actionName} with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Normalize project root path - common to almost all tools
|
||||
const projectRootRaw = args.projectRoot || process.cwd();
|
||||
const projectRoot = path.isAbsolute(projectRootRaw)
|
||||
? projectRootRaw
|
||||
: path.resolve(process.cwd(), projectRootRaw);
|
||||
|
||||
log.info(`Using project root: ${projectRoot}`);
|
||||
const executionArgs = { ...args, projectRoot };
|
||||
|
||||
let result;
|
||||
const cacheKey = cacheKeyGenerator ? cacheKeyGenerator(executionArgs) : null;
|
||||
|
||||
if (cacheKey) {
|
||||
// Use caching utility
|
||||
log.info(`Caching enabled for ${actionName} with key: ${cacheKey}`);
|
||||
const cacheWrappedAction = async () => await actionFn(executionArgs, log);
|
||||
result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: cacheWrappedAction,
|
||||
log
|
||||
});
|
||||
} else {
|
||||
// Execute directly without caching
|
||||
log.info(`Caching disabled for ${actionName}. Executing directly.`);
|
||||
// We need to ensure the result from actionFn has a fromCache field
|
||||
// Let's assume actionFn now consistently returns { success, data/error, fromCache }
|
||||
// The current listTasksDirect does this if it calls getCachedOrExecute internally.
|
||||
result = await actionFn(executionArgs, log);
|
||||
// If the action function itself doesn't determine caching (like our original listTasksDirect refactor attempt),
|
||||
// we'd set it here:
|
||||
// result.fromCache = false;
|
||||
}
|
||||
|
||||
// Handle error case
|
||||
if (!result.success) {
|
||||
const errorMsg = result.error?.message || `Unknown error during ${actionName.toLowerCase()}`;
|
||||
// Include fromCache in error logs too, might be useful
|
||||
log.error(`Error during ${actionName.toLowerCase()}: ${errorMsg}. From cache: ${result.fromCache}`);
|
||||
return createErrorResponse(errorMsg);
|
||||
}
|
||||
|
||||
// Log success
|
||||
log.info(`Successfully completed ${actionName.toLowerCase()}. From cache: ${result.fromCache}`);
|
||||
|
||||
// Process the result data if needed
|
||||
const processedData = processResult ? processResult(result.data) : result.data;
|
||||
|
||||
// Create a new object that includes both the processed data and the fromCache flag
|
||||
const responsePayload = {
|
||||
fromCache: result.fromCache, // Include the flag here
|
||||
data: processedData // Embed the actual data under a 'data' key
|
||||
};
|
||||
|
||||
// Pass this combined payload to createContentResponse
|
||||
return createContentResponse(responsePayload);
|
||||
|
||||
} catch (error) {
|
||||
// Handle unexpected errors during the execution wrapper itself
|
||||
log.error(`Unexpected error during ${actionName.toLowerCase()} execution wrapper: ${error.message}`);
|
||||
console.error(error.stack); // Log stack for debugging wrapper errors
|
||||
return createErrorResponse(`Internal server error during ${actionName.toLowerCase()}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively removes specified fields from task objects, whether single or in an array.
|
||||
* Handles common data structures returned by task commands.
|
||||
@@ -276,7 +299,7 @@ export async function executeMCPToolAction({
|
||||
* @param {string[]} fieldsToRemove - An array of field names to remove.
|
||||
* @returns {Object|Array} - The processed data with specified fields removed.
|
||||
*/
|
||||
export function processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testStrategy']) {
|
||||
function processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testStrategy']) {
|
||||
if (!taskOrData) {
|
||||
return taskOrData;
|
||||
}
|
||||
@@ -333,7 +356,7 @@ export function processMCPResponseData(taskOrData, fieldsToRemove = ['details',
|
||||
* @param {string|Object} content - Content to include in response
|
||||
* @returns {Object} - Content response object in FastMCP format
|
||||
*/
|
||||
export function createContentResponse(content) {
|
||||
function createContentResponse(content) {
|
||||
// FastMCP requires text type, so we format objects as JSON strings
|
||||
return {
|
||||
content: [
|
||||
@@ -365,3 +388,14 @@ export function createErrorResponse(errorMessage) {
|
||||
isError: true
|
||||
};
|
||||
}
|
||||
|
||||
// Ensure all functions are exported
|
||||
export {
|
||||
getProjectRoot,
|
||||
getProjectRootFromSession,
|
||||
handleApiResult,
|
||||
executeTaskMasterCommand,
|
||||
getCachedOrExecute,
|
||||
processMCPResponseData,
|
||||
createContentResponse,
|
||||
};
|
||||
|
||||
58
mcp-server/src/tools/validate-dependencies.js
Normal file
58
mcp-server/src/tools/validate-dependencies.js
Normal file
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
* tools/validate-dependencies.js
|
||||
* Tool for validating task dependencies
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
handleApiResult,
|
||||
createErrorResponse,
|
||||
getProjectRootFromSession
|
||||
} from "./utils.js";
|
||||
import { validateDependenciesDirect } from "../core/task-master-core.js";
|
||||
|
||||
/**
|
||||
* Register the validateDependencies tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerValidateDependenciesTool(server) {
|
||||
server.addTool({
|
||||
name: "validate_dependencies",
|
||||
description: "Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.",
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||
}),
|
||||
execute: async (args, { log, session, reportProgress }) => {
|
||||
try {
|
||||
log.info(`Validating dependencies with args: ${JSON.stringify(args)}`);
|
||||
await reportProgress({ progress: 0 });
|
||||
|
||||
let rootFolder = getProjectRootFromSession(session, log);
|
||||
|
||||
if (!rootFolder && args.projectRoot) {
|
||||
rootFolder = args.projectRoot;
|
||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||
}
|
||||
|
||||
const result = await validateDependenciesDirect({
|
||||
projectRoot: rootFolder,
|
||||
...args
|
||||
}, log, { reportProgress, mcpLog: log, session});
|
||||
|
||||
await reportProgress({ progress: 100 });
|
||||
|
||||
if (result.success) {
|
||||
log.info(`Successfully validated dependencies: ${result.data.message}`);
|
||||
} else {
|
||||
log.error(`Failed to validate dependencies: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return handleApiResult(result, log, 'Error validating dependencies');
|
||||
} catch (error) {
|
||||
log.error(`Error in validateDependencies tool: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
71
mcp-test.js
Normal file
71
mcp-test.js
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import { Config } from 'fastmcp';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
|
||||
// Log the current directory
|
||||
console.error(`Current working directory: ${process.cwd()}`);
|
||||
|
||||
try {
|
||||
console.error('Attempting to load FastMCP Config...');
|
||||
|
||||
// Check if .cursor/mcp.json exists
|
||||
const mcpPath = path.join(process.cwd(), '.cursor', 'mcp.json');
|
||||
console.error(`Checking if mcp.json exists at: ${mcpPath}`);
|
||||
|
||||
if (fs.existsSync(mcpPath)) {
|
||||
console.error('mcp.json file found');
|
||||
console.error(`File content: ${JSON.stringify(JSON.parse(fs.readFileSync(mcpPath, 'utf8')), null, 2)}`);
|
||||
} else {
|
||||
console.error('mcp.json file not found');
|
||||
}
|
||||
|
||||
// Try to create Config
|
||||
const config = new Config();
|
||||
console.error('Config created successfully');
|
||||
|
||||
// Check if env property exists
|
||||
if (config.env) {
|
||||
console.error(`Config.env exists with keys: ${Object.keys(config.env).join(', ')}`);
|
||||
|
||||
// Print each env var value (careful with sensitive values)
|
||||
for (const [key, value] of Object.entries(config.env)) {
|
||||
if (key.includes('KEY')) {
|
||||
console.error(`${key}: [value hidden]`);
|
||||
} else {
|
||||
console.error(`${key}: ${value}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.error('Config.env does not exist');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error loading Config: ${error.message}`);
|
||||
console.error(`Stack trace: ${error.stack}`);
|
||||
}
|
||||
|
||||
// Log process.env to see if values from mcp.json were loaded automatically
|
||||
console.error('\nChecking if process.env already has values from mcp.json:');
|
||||
const envVars = [
|
||||
'ANTHROPIC_API_KEY',
|
||||
'PERPLEXITY_API_KEY',
|
||||
'MODEL',
|
||||
'PERPLEXITY_MODEL',
|
||||
'MAX_TOKENS',
|
||||
'TEMPERATURE',
|
||||
'DEFAULT_SUBTASKS',
|
||||
'DEFAULT_PRIORITY'
|
||||
];
|
||||
|
||||
for (const varName of envVars) {
|
||||
if (process.env[varName]) {
|
||||
if (varName.includes('KEY')) {
|
||||
console.error(`${varName}: [value hidden]`);
|
||||
} else {
|
||||
console.error(`${varName}: ${process.env[varName]}`);
|
||||
}
|
||||
} else {
|
||||
console.error(`${varName}: not set`);
|
||||
}
|
||||
}
|
||||
470
package-lock.json
generated
470
package-lock.json
generated
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.9.30",
|
||||
"version": "0.10.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "task-master-ai",
|
||||
"version": "0.9.30",
|
||||
"license": "MIT",
|
||||
"version": "0.10.0",
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"boxen": "^8.0.1",
|
||||
@@ -22,15 +22,17 @@
|
||||
"fuse.js": "^7.0.0",
|
||||
"gradient-string": "^3.0.0",
|
||||
"helmet": "^8.1.0",
|
||||
"inquirer": "^12.5.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lru-cache": "^10.2.0",
|
||||
"openai": "^4.89.0",
|
||||
"ora": "^8.2.0"
|
||||
"ora": "^8.2.0",
|
||||
"uuid": "^11.1.0"
|
||||
},
|
||||
"bin": {
|
||||
"task-master": "bin/task-master.js",
|
||||
"task-master-init": "bin/task-master-init.js",
|
||||
"task-master-mcp-server": "mcp-server/server.js"
|
||||
"task-master-mcp": "mcp-server/server.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@changesets/changelog-github": "^0.5.1",
|
||||
@@ -937,6 +939,365 @@
|
||||
"node": ">=0.1.90"
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/checkbox": {
|
||||
"version": "4.1.4",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/checkbox/-/checkbox-4.1.4.tgz",
|
||||
"integrity": "sha512-d30576EZdApjAMceijXA5jDzRQHT/MygbC+J8I7EqA6f/FRpYxlRtRJbHF8gHeWYeSdOuTEJqonn7QLB1ELezA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/figures": "^1.0.11",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"ansi-escapes": "^4.3.2",
|
||||
"yoctocolors-cjs": "^2.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/confirm": {
|
||||
"version": "5.1.8",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.8.tgz",
|
||||
"integrity": "sha512-dNLWCYZvXDjO3rnQfk2iuJNL4Ivwz/T2+C3+WnNfJKsNGSuOs3wAo2F6e0p946gtSAk31nZMfW+MRmYaplPKsg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/type": "^3.0.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/core": {
|
||||
"version": "10.1.9",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.1.9.tgz",
|
||||
"integrity": "sha512-sXhVB8n20NYkUBfDYgizGHlpRVaCRjtuzNZA6xpALIUbkgfd2Hjz+DfEN6+h1BRnuxw0/P4jCIMjMsEOAMwAJw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/figures": "^1.0.11",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"ansi-escapes": "^4.3.2",
|
||||
"cli-width": "^4.1.0",
|
||||
"mute-stream": "^2.0.0",
|
||||
"signal-exit": "^4.1.0",
|
||||
"wrap-ansi": "^6.2.0",
|
||||
"yoctocolors-cjs": "^2.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/core/node_modules/ansi-regex": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
|
||||
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/core/node_modules/emoji-regex": {
|
||||
"version": "8.0.0",
|
||||
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
||||
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@inquirer/core/node_modules/string-width": {
|
||||
"version": "4.2.3",
|
||||
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
|
||||
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"emoji-regex": "^8.0.0",
|
||||
"is-fullwidth-code-point": "^3.0.0",
|
||||
"strip-ansi": "^6.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/core/node_modules/strip-ansi": {
|
||||
"version": "6.0.1",
|
||||
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
|
||||
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ansi-regex": "^5.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/core/node_modules/wrap-ansi": {
|
||||
"version": "6.2.0",
|
||||
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz",
|
||||
"integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ansi-styles": "^4.0.0",
|
||||
"string-width": "^4.1.0",
|
||||
"strip-ansi": "^6.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/editor": {
|
||||
"version": "4.2.9",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/editor/-/editor-4.2.9.tgz",
|
||||
"integrity": "sha512-8HjOppAxO7O4wV1ETUlJFg6NDjp/W2NP5FB9ZPAcinAlNT4ZIWOLe2pUVwmmPRSV0NMdI5r/+lflN55AwZOKSw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"external-editor": "^3.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/expand": {
|
||||
"version": "4.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/expand/-/expand-4.0.11.tgz",
|
||||
"integrity": "sha512-OZSUW4hFMW2TYvX/Sv+NnOZgO8CHT2TU1roUCUIF2T+wfw60XFRRp9MRUPCT06cRnKL+aemt2YmTWwt7rOrNEA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"yoctocolors-cjs": "^2.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/figures": {
|
||||
"version": "1.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.11.tgz",
|
||||
"integrity": "sha512-eOg92lvrn/aRUqbxRyvpEWnrvRuTYRifixHkYVpJiygTgVSBIHDqLh0SrMQXkafvULg3ck11V7xvR+zcgvpHFw==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/input": {
|
||||
"version": "4.1.8",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/input/-/input-4.1.8.tgz",
|
||||
"integrity": "sha512-WXJI16oOZ3/LiENCAxe8joniNp8MQxF6Wi5V+EBbVA0ZIOpFcL4I9e7f7cXse0HJeIPCWO8Lcgnk98juItCi7Q==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/type": "^3.0.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/number": {
|
||||
"version": "3.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/number/-/number-3.0.11.tgz",
|
||||
"integrity": "sha512-pQK68CsKOgwvU2eA53AG/4npRTH2pvs/pZ2bFvzpBhrznh8Mcwt19c+nMO7LHRr3Vreu1KPhNBF3vQAKrjIulw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/type": "^3.0.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/password": {
|
||||
"version": "4.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/password/-/password-4.0.11.tgz",
|
||||
"integrity": "sha512-dH6zLdv+HEv1nBs96Case6eppkRggMe8LoOTl30+Gq5Wf27AO/vHFgStTVz4aoevLdNXqwE23++IXGw4eiOXTg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"ansi-escapes": "^4.3.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/prompts": {
|
||||
"version": "7.4.0",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/prompts/-/prompts-7.4.0.tgz",
|
||||
"integrity": "sha512-EZiJidQOT4O5PYtqnu1JbF0clv36oW2CviR66c7ma4LsupmmQlUwmdReGKRp456OWPWMz3PdrPiYg3aCk3op2w==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/checkbox": "^4.1.4",
|
||||
"@inquirer/confirm": "^5.1.8",
|
||||
"@inquirer/editor": "^4.2.9",
|
||||
"@inquirer/expand": "^4.0.11",
|
||||
"@inquirer/input": "^4.1.8",
|
||||
"@inquirer/number": "^3.0.11",
|
||||
"@inquirer/password": "^4.0.11",
|
||||
"@inquirer/rawlist": "^4.0.11",
|
||||
"@inquirer/search": "^3.0.11",
|
||||
"@inquirer/select": "^4.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/rawlist": {
|
||||
"version": "4.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/rawlist/-/rawlist-4.0.11.tgz",
|
||||
"integrity": "sha512-uAYtTx0IF/PqUAvsRrF3xvnxJV516wmR6YVONOmCWJbbt87HcDHLfL9wmBQFbNJRv5kCjdYKrZcavDkH3sVJPg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"yoctocolors-cjs": "^2.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/search": {
|
||||
"version": "3.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/search/-/search-3.0.11.tgz",
|
||||
"integrity": "sha512-9CWQT0ikYcg6Ls3TOa7jljsD7PgjcsYEM0bYE+Gkz+uoW9u8eaJCRHJKkucpRE5+xKtaaDbrND+nPDoxzjYyew==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/figures": "^1.0.11",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"yoctocolors-cjs": "^2.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/select": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/select/-/select-4.1.0.tgz",
|
||||
"integrity": "sha512-z0a2fmgTSRN+YBuiK1ROfJ2Nvrpij5lVN3gPDkQGhavdvIVGHGW29LwYZfM/j42Ai2hUghTI/uoBuTbrJk42bA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/figures": "^1.0.11",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"ansi-escapes": "^4.3.2",
|
||||
"yoctocolors-cjs": "^2.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@inquirer/type": {
|
||||
"version": "3.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.5.tgz",
|
||||
"integrity": "sha512-ZJpeIYYueOz/i/ONzrfof8g89kNdO2hjGuvULROo3O8rlB2CRtSseE5KeirnyE4t/thAn/EwvS/vuQeJCn+NZg==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@istanbuljs/load-nyc-config": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz",
|
||||
@@ -2098,7 +2459,6 @@
|
||||
"version": "4.3.2",
|
||||
"resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
|
||||
"integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"type-fest": "^0.21.3"
|
||||
@@ -2114,7 +2474,6 @@
|
||||
"version": "0.21.3",
|
||||
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
|
||||
"integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
|
||||
"dev": true,
|
||||
"license": "(MIT OR CC0-1.0)",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
@@ -2618,7 +2977,6 @@
|
||||
"version": "0.7.0",
|
||||
"resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
|
||||
"integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/ci-info": {
|
||||
@@ -2739,6 +3097,15 @@
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/cli-width": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz",
|
||||
"integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">= 12"
|
||||
}
|
||||
},
|
||||
"node_modules/cliui": {
|
||||
"version": "8.0.1",
|
||||
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
|
||||
@@ -3545,7 +3912,6 @@
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz",
|
||||
"integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"chardet": "^0.7.0",
|
||||
@@ -4419,6 +4785,32 @@
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/inquirer": {
|
||||
"version": "12.5.0",
|
||||
"resolved": "https://registry.npmjs.org/inquirer/-/inquirer-12.5.0.tgz",
|
||||
"integrity": "sha512-aiBBq5aKF1k87MTxXDylLfwpRwToShiHrSv4EmB07EYyLgmnjEz5B3rn0aGw1X3JA/64Ngf2T54oGwc+BCsPIQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@inquirer/core": "^10.1.9",
|
||||
"@inquirer/prompts": "^7.4.0",
|
||||
"@inquirer/type": "^3.0.5",
|
||||
"ansi-escapes": "^4.3.2",
|
||||
"mute-stream": "^2.0.0",
|
||||
"run-async": "^3.0.0",
|
||||
"rxjs": "^7.8.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@types/node": ">=18"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/node": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/ipaddr.js": {
|
||||
"version": "1.9.1",
|
||||
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
|
||||
@@ -5736,6 +6128,15 @@
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/mute-stream": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-2.0.0.tgz",
|
||||
"integrity": "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": "^18.17.0 || >=20.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/natural-compare": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
|
||||
@@ -5952,7 +6353,6 @@
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
|
||||
"integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
@@ -6555,6 +6955,15 @@
|
||||
"node": ">=16"
|
||||
}
|
||||
},
|
||||
"node_modules/run-async": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/run-async/-/run-async-3.0.0.tgz",
|
||||
"integrity": "sha512-540WwVDOMxA6dN6We19EcT9sc3hkXPw5mzRNGM3FkdN/vtE9NFvj5lFAPNwUDmJjXidm3v7TC1cTE7t17Ulm1Q==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.12.0"
|
||||
}
|
||||
},
|
||||
"node_modules/run-parallel": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
|
||||
@@ -6579,6 +6988,15 @@
|
||||
"queue-microtask": "^1.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/rxjs": {
|
||||
"version": "7.8.2",
|
||||
"resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.2.tgz",
|
||||
"integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/safe-buffer": {
|
||||
"version": "5.2.1",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
|
||||
@@ -7124,7 +7542,6 @@
|
||||
"version": "0.0.33",
|
||||
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz",
|
||||
"integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"os-tmpdir": "~1.0.2"
|
||||
@@ -7179,6 +7596,12 @@
|
||||
"url": "https://github.com/sponsors/Borewit"
|
||||
}
|
||||
},
|
||||
"node_modules/tslib": {
|
||||
"version": "2.8.1",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
|
||||
"integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
|
||||
"license": "0BSD"
|
||||
},
|
||||
"node_modules/type-detect": {
|
||||
"version": "4.0.8",
|
||||
"resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
|
||||
@@ -7318,6 +7741,19 @@
|
||||
"node": ">= 0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/uuid": {
|
||||
"version": "11.1.0",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz",
|
||||
"integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==",
|
||||
"funding": [
|
||||
"https://github.com/sponsors/broofa",
|
||||
"https://github.com/sponsors/ctavan"
|
||||
],
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
"uuid": "dist/esm/bin/uuid"
|
||||
}
|
||||
},
|
||||
"node_modules/v8-to-istanbul": {
|
||||
"version": "9.3.0",
|
||||
"resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz",
|
||||
@@ -7556,6 +7992,18 @@
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/yoctocolors-cjs": {
|
||||
"version": "2.1.2",
|
||||
"resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz",
|
||||
"integrity": "sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/zod": {
|
||||
"version": "3.24.2",
|
||||
"resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz",
|
||||
|
||||
11
package.json
11
package.json
@@ -7,6 +7,7 @@
|
||||
"bin": {
|
||||
"task-master": "bin/task-master.js",
|
||||
"task-master-init": "bin/task-master-init.js",
|
||||
"task-master-mcp": "mcp-server/server.js",
|
||||
"task-master-mcp-server": "mcp-server/server.js"
|
||||
},
|
||||
"scripts": {
|
||||
@@ -16,10 +17,11 @@
|
||||
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
|
||||
"prepare-package": "node scripts/prepare-package.js",
|
||||
"prepublishOnly": "npm run prepare-package",
|
||||
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js",
|
||||
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js mcp-server/server.js",
|
||||
"changeset": "changeset",
|
||||
"release": "changeset publish",
|
||||
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js"
|
||||
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js",
|
||||
"mcp-server": "node mcp-server/server.js"
|
||||
},
|
||||
"keywords": [
|
||||
"claude",
|
||||
@@ -37,7 +39,6 @@
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"@model-context-protocol/sdk": "^1.20.5",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^4.1.2",
|
||||
"cli-table3": "^0.6.5",
|
||||
@@ -50,10 +51,12 @@
|
||||
"fuse.js": "^7.0.0",
|
||||
"gradient-string": "^3.0.0",
|
||||
"helmet": "^8.1.0",
|
||||
"inquirer": "^12.5.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lru-cache": "^10.2.0",
|
||||
"openai": "^4.89.0",
|
||||
"ora": "^8.2.0"
|
||||
"ora": "^8.2.0",
|
||||
"uuid": "^11.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
|
||||
@@ -212,6 +212,9 @@ function copyTemplateFile(templateName, targetPath, replacements = {}) {
|
||||
case 'dev_workflow.mdc':
|
||||
sourcePath = path.join(__dirname, '..', '.cursor', 'rules', 'dev_workflow.mdc');
|
||||
break;
|
||||
case 'taskmaster.mdc':
|
||||
sourcePath = path.join(__dirname, '..', '.cursor', 'rules', 'taskmaster.mdc');
|
||||
break;
|
||||
case 'cursor_rules.mdc':
|
||||
sourcePath = path.join(__dirname, '..', '.cursor', 'rules', 'cursor_rules.mdc');
|
||||
break;
|
||||
@@ -504,15 +507,24 @@ function createProjectStructure(projectName, projectDescription, projectVersion,
|
||||
},
|
||||
dependencies: {
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"chalk": "^5.3.0",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^4.1.2",
|
||||
"commander": "^11.1.0",
|
||||
"cli-table3": "^0.6.5",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.3.1",
|
||||
"openai": "^4.86.1",
|
||||
"figlet": "^1.7.0",
|
||||
"boxen": "^7.1.1",
|
||||
"gradient-string": "^2.0.2",
|
||||
"cli-table3": "^0.6.3",
|
||||
"ora": "^7.0.1"
|
||||
"express": "^4.21.2",
|
||||
"fastmcp": "^1.20.5",
|
||||
"figlet": "^1.8.0",
|
||||
"fuse.js": "^7.0.0",
|
||||
"gradient-string": "^3.0.0",
|
||||
"helmet": "^8.1.0",
|
||||
"inquirer": "^12.5.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lru-cache": "^10.2.0",
|
||||
"openai": "^4.89.0",
|
||||
"ora": "^8.2.0",
|
||||
"task-master-ai": "^0.9.31"
|
||||
}
|
||||
};
|
||||
|
||||
@@ -585,6 +597,9 @@ function createProjectStructure(projectName, projectDescription, projectVersion,
|
||||
// Copy dev_workflow.mdc
|
||||
copyTemplateFile('dev_workflow.mdc', path.join(targetDir, '.cursor', 'rules', 'dev_workflow.mdc'));
|
||||
|
||||
// Copy taskmaster.mdc
|
||||
copyTemplateFile('taskmaster.mdc', path.join(targetDir, '.cursor', 'rules', 'taskmaster.mdc'));
|
||||
|
||||
// Copy cursor_rules.mdc
|
||||
copyTemplateFile('cursor_rules.mdc', path.join(targetDir, '.cursor', 'rules', 'cursor_rules.mdc'));
|
||||
|
||||
@@ -694,9 +709,19 @@ function setupMCPConfiguration(targetDir, projectName) {
|
||||
"task-master-ai": {
|
||||
"command": "npx",
|
||||
"args": [
|
||||
"task-master-ai",
|
||||
"mcp-server"
|
||||
]
|
||||
"-y",
|
||||
"task-master-mcp-server"
|
||||
],
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "%ANTHROPIC_API_KEY%",
|
||||
"PERPLEXITY_API_KEY": "%PERPLEXITY_API_KEY%",
|
||||
"MODEL": "claude-3-7-sonnet-20250219",
|
||||
"PERPLEXITY_MODEL": "sonar-pro",
|
||||
"MAX_TOKENS": 64000,
|
||||
"TEMPERATURE": 0.3,
|
||||
"DEFAULT_SUBTASKS": 5,
|
||||
"DEFAULT_PRIORITY": "medium"
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -136,9 +136,13 @@ function handleClaudeError(error) {
|
||||
* @param {string} prdPath - Path to the PRD file
|
||||
* @param {number} numTasks - Number of tasks to generate
|
||||
* @param {number} retryCount - Retry count
|
||||
* @param {Object} options - Options object containing:
|
||||
* - reportProgress: Function to report progress to MCP server (optional)
|
||||
* - mcpLog: MCP logger object (optional)
|
||||
* - session: Session object from MCP server (optional)
|
||||
* @returns {Object} Claude's response
|
||||
*/
|
||||
async function callClaude(prdContent, prdPath, numTasks, retryCount = 0) {
|
||||
async function callClaude(prdContent, prdPath, numTasks, retryCount = 0, { reportProgress, mcpLog, session } = {}) {
|
||||
try {
|
||||
log('info', 'Calling Claude...');
|
||||
|
||||
@@ -167,6 +171,9 @@ Guidelines:
|
||||
6. Set appropriate dependency IDs (a task can only depend on tasks with lower IDs)
|
||||
7. Assign priority (high/medium/low) based on criticality and dependency order
|
||||
8. Include detailed implementation guidance in the "details" field
|
||||
9. If the PRD contains specific requirements for libraries, database schemas, frameworks, tech stacks, or any other implementation details, STRICTLY ADHERE to these requirements in your task breakdown and do not discard them under any circumstance
|
||||
10. Focus on filling in any gaps left by the PRD or areas that aren't fully specified, while preserving all explicit requirements
|
||||
11. Always aim to provide the most direct path to implementation, avoiding over-engineering or roundabout approaches
|
||||
|
||||
Expected output format:
|
||||
{
|
||||
@@ -190,7 +197,7 @@ Expected output format:
|
||||
Important: Your response must be valid JSON only, with no additional explanation or comments.`;
|
||||
|
||||
// Use streaming request to handle large responses and show progress
|
||||
return await handleStreamingRequest(prdContent, prdPath, numTasks, CONFIG.maxTokens, systemPrompt);
|
||||
return await handleStreamingRequest(prdContent, prdPath, numTasks, CONFIG.maxTokens, systemPrompt, { reportProgress, mcpLog, session } = {});
|
||||
} catch (error) {
|
||||
// Get user-friendly error message
|
||||
const userMessage = handleClaudeError(error);
|
||||
@@ -224,19 +231,24 @@ Important: Your response must be valid JSON only, with no additional explanation
|
||||
* @param {number} numTasks - Number of tasks to generate
|
||||
* @param {number} maxTokens - Maximum tokens
|
||||
* @param {string} systemPrompt - System prompt
|
||||
* @param {Object} options - Options object containing:
|
||||
* - reportProgress: Function to report progress to MCP server (optional)
|
||||
* - mcpLog: MCP logger object (optional)
|
||||
* - session: Session object from MCP server (optional)
|
||||
* @returns {Object} Claude's response
|
||||
*/
|
||||
async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens, systemPrompt) {
|
||||
async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens, systemPrompt, { reportProgress, mcpLog, session } = {}) {
|
||||
const loadingIndicator = startLoadingIndicator('Generating tasks from PRD...');
|
||||
if (reportProgress) { await reportProgress({ progress: 0 }); }
|
||||
let responseText = '';
|
||||
let streamingInterval = null;
|
||||
|
||||
try {
|
||||
// Use streaming for handling large responses
|
||||
const stream = await anthropic.messages.create({
|
||||
model: CONFIG.model,
|
||||
max_tokens: maxTokens,
|
||||
temperature: CONFIG.temperature,
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
@@ -261,6 +273,12 @@ async function handleStreamingRequest(prdContent, prdPath, numTasks, maxTokens,
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (responseText.length / maxTokens) * 100 });
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${responseText.length / maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
@@ -355,9 +373,13 @@ function processClaudeResponse(textContent, numTasks, retryCount, prdContent, pr
|
||||
* @param {number} numSubtasks - Number of subtasks to generate
|
||||
* @param {number} nextSubtaskId - Next subtask ID
|
||||
* @param {string} additionalContext - Additional context
|
||||
* @param {Object} options - Options object containing:
|
||||
* - reportProgress: Function to report progress to MCP server (optional)
|
||||
* - mcpLog: MCP logger object (optional)
|
||||
* - session: Session object from MCP server (optional)
|
||||
* @returns {Array} Generated subtasks
|
||||
*/
|
||||
async function generateSubtasks(task, numSubtasks, nextSubtaskId, additionalContext = '') {
|
||||
async function generateSubtasks(task, numSubtasks, nextSubtaskId, additionalContext = '', { reportProgress, mcpLog, session } = {}) {
|
||||
try {
|
||||
log('info', `Generating ${numSubtasks} subtasks for task ${task.id}: ${task.title}`);
|
||||
|
||||
@@ -419,11 +441,13 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
|
||||
dotCount = (dotCount + 1) % 4;
|
||||
}, 500);
|
||||
|
||||
// TODO: MOVE THIS TO THE STREAM REQUEST FUNCTION (DRY)
|
||||
|
||||
// Use streaming API call
|
||||
const stream = await anthropic.messages.create({
|
||||
model: CONFIG.model,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: CONFIG.temperature,
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
@@ -439,6 +463,12 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (responseText.length / CONFIG.maxTokens) * 100 });
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${responseText.length / CONFIG.maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
@@ -464,15 +494,19 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
|
||||
* @param {number} numSubtasks - Number of subtasks to generate
|
||||
* @param {number} nextSubtaskId - Next subtask ID
|
||||
* @param {string} additionalContext - Additional context
|
||||
* @param {Object} options - Options object containing:
|
||||
* - reportProgress: Function to report progress to MCP server (optional)
|
||||
* - mcpLog: MCP logger object (optional)
|
||||
* - session: Session object from MCP server (optional)
|
||||
* @returns {Array} Generated subtasks
|
||||
*/
|
||||
async function generateSubtasksWithPerplexity(task, numSubtasks = 3, nextSubtaskId = 1, additionalContext = '') {
|
||||
async function generateSubtasksWithPerplexity(task, numSubtasks = 3, nextSubtaskId = 1, additionalContext = '', { reportProgress, mcpLog, session } = {}) {
|
||||
try {
|
||||
// First, perform research to get context
|
||||
log('info', `Researching context for task ${task.id}: ${task.title}`);
|
||||
const perplexityClient = getPerplexityClient();
|
||||
|
||||
const PERPLEXITY_MODEL = process.env.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const PERPLEXITY_MODEL = process.env.PERPLEXITY_MODEL || session?.env?.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const researchLoadingIndicator = startLoadingIndicator('Researching best practices with Perplexity AI...');
|
||||
|
||||
// Formulate research query based on task
|
||||
@@ -566,9 +600,9 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
|
||||
|
||||
// Use streaming API call
|
||||
const stream = await anthropic.messages.create({
|
||||
model: CONFIG.model,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: CONFIG.temperature,
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
@@ -584,6 +618,12 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (responseText.length / CONFIG.maxTokens) * 100 });
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${responseText.length / CONFIG.maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
|
||||
@@ -9,6 +9,7 @@ import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import fs from 'fs';
|
||||
import https from 'https';
|
||||
import inquirer from 'inquirer';
|
||||
|
||||
import { CONFIG, log, readJSON } from './utils.js';
|
||||
import {
|
||||
@@ -25,7 +26,10 @@ import {
|
||||
removeSubtask,
|
||||
analyzeTaskComplexity,
|
||||
updateTaskById,
|
||||
updateSubtaskById
|
||||
updateSubtaskById,
|
||||
removeTask,
|
||||
findTaskById,
|
||||
taskExists
|
||||
} from './task-manager.js';
|
||||
|
||||
import {
|
||||
@@ -42,7 +46,9 @@ import {
|
||||
displayTaskById,
|
||||
displayComplexityReport,
|
||||
getStatusWithColor,
|
||||
confirmTaskOverwrite
|
||||
confirmTaskOverwrite,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator
|
||||
} from './ui.js';
|
||||
|
||||
/**
|
||||
@@ -864,6 +870,119 @@ function registerCommands(programInstance) {
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
// remove-task command
|
||||
programInstance
|
||||
.command('remove-task')
|
||||
.description('Remove a task or subtask permanently')
|
||||
.option('-i, --id <id>', 'ID of the task or subtask to remove (e.g., "5" or "5.2")')
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option('-y, --yes', 'Skip confirmation prompt', false)
|
||||
.action(async (options) => {
|
||||
const tasksPath = options.file;
|
||||
const taskId = options.id;
|
||||
|
||||
if (!taskId) {
|
||||
console.error(chalk.red('Error: Task ID is required'));
|
||||
console.error(chalk.yellow('Usage: task-master remove-task --id=<taskId>'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
// Check if the task exists
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
console.error(chalk.red(`Error: No valid tasks found in ${tasksPath}`));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!taskExists(data.tasks, taskId)) {
|
||||
console.error(chalk.red(`Error: Task with ID ${taskId} not found`));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Load task for display
|
||||
const task = findTaskById(data.tasks, taskId);
|
||||
|
||||
// Skip confirmation if --yes flag is provided
|
||||
if (!options.yes) {
|
||||
// Display task information
|
||||
console.log();
|
||||
console.log(chalk.red.bold('⚠️ WARNING: This will permanently delete the following task:'));
|
||||
console.log();
|
||||
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
// It's a subtask
|
||||
const [parentId, subtaskId] = taskId.split('.');
|
||||
console.log(chalk.white.bold(`Subtask ${taskId}: ${task.title}`));
|
||||
console.log(chalk.gray(`Parent Task: ${task.parentTask.id} - ${task.parentTask.title}`));
|
||||
} else {
|
||||
// It's a main task
|
||||
console.log(chalk.white.bold(`Task ${taskId}: ${task.title}`));
|
||||
|
||||
// Show if it has subtasks
|
||||
if (task.subtasks && task.subtasks.length > 0) {
|
||||
console.log(chalk.yellow(`⚠️ This task has ${task.subtasks.length} subtasks that will also be deleted!`));
|
||||
}
|
||||
|
||||
// Show if other tasks depend on it
|
||||
const dependentTasks = data.tasks.filter(t =>
|
||||
t.dependencies && t.dependencies.includes(parseInt(taskId, 10)));
|
||||
|
||||
if (dependentTasks.length > 0) {
|
||||
console.log(chalk.yellow(`⚠️ Warning: ${dependentTasks.length} other tasks depend on this task!`));
|
||||
console.log(chalk.yellow('These dependencies will be removed:'));
|
||||
dependentTasks.forEach(t => {
|
||||
console.log(chalk.yellow(` - Task ${t.id}: ${t.title}`));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
console.log();
|
||||
|
||||
// Prompt for confirmation
|
||||
const { confirm } = await inquirer.prompt([
|
||||
{
|
||||
type: 'confirm',
|
||||
name: 'confirm',
|
||||
message: chalk.red.bold('Are you sure you want to permanently delete this task?'),
|
||||
default: false
|
||||
}
|
||||
]);
|
||||
|
||||
if (!confirm) {
|
||||
console.log(chalk.blue('Task deletion cancelled.'));
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
const indicator = startLoadingIndicator('Removing task...');
|
||||
|
||||
// Remove the task
|
||||
const result = await removeTask(tasksPath, taskId);
|
||||
|
||||
stopLoadingIndicator(indicator);
|
||||
|
||||
// Display success message with appropriate color based on task or subtask
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
// It was a subtask
|
||||
console.log(boxen(
|
||||
chalk.green(`Subtask ${taskId} has been successfully removed`),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
));
|
||||
} else {
|
||||
// It was a main task
|
||||
console.log(boxen(
|
||||
chalk.green(`Task ${taskId} has been successfully removed`),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
|
||||
));
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error(chalk.red(`Error: ${error.message || 'An unknown error occurred'}`));
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
// Add more commands as needed...
|
||||
|
||||
return programInstance;
|
||||
|
||||
@@ -20,7 +20,9 @@ import {
|
||||
findTaskById,
|
||||
readComplexityReport,
|
||||
findTaskInComplexityReport,
|
||||
truncate
|
||||
truncate,
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
} from './utils.js';
|
||||
|
||||
import {
|
||||
@@ -49,19 +51,19 @@ import {
|
||||
|
||||
// Initialize Anthropic client
|
||||
const anthropic = new Anthropic({
|
||||
apiKey: process.env.ANTHROPIC_API_KEY,
|
||||
apiKey: process.env.ANTHROPIC_API_KEY || session?.env?.ANTHROPIC_API_KEY,
|
||||
});
|
||||
|
||||
// Import perplexity if available
|
||||
let perplexity;
|
||||
|
||||
try {
|
||||
if (process.env.PERPLEXITY_API_KEY) {
|
||||
if (process.env.PERPLEXITY_API_KEY || session?.env?.PERPLEXITY_API_KEY) {
|
||||
// Using the existing approach from ai-services.js
|
||||
const OpenAI = (await import('openai')).default;
|
||||
|
||||
perplexity = new OpenAI({
|
||||
apiKey: process.env.PERPLEXITY_API_KEY,
|
||||
apiKey: process.env.PERPLEXITY_API_KEY || session?.env?.PERPLEXITY_API_KEY,
|
||||
baseURL: 'https://api.perplexity.ai',
|
||||
});
|
||||
|
||||
@@ -77,8 +79,11 @@ try {
|
||||
* @param {string} prdPath - Path to the PRD file
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {number} numTasks - Number of tasks to generate
|
||||
* @param {function} reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} mcpLog - MCP logger object (optional)
|
||||
* @param {Object} session - Session object from MCP server (optional)
|
||||
*/
|
||||
async function parsePRD(prdPath, tasksPath, numTasks) {
|
||||
async function parsePRD(prdPath, tasksPath, numTasks, { reportProgress, mcpLog, session } = {}) {
|
||||
try {
|
||||
log('info', `Parsing PRD file: ${prdPath}`);
|
||||
|
||||
@@ -86,22 +91,27 @@ async function parsePRD(prdPath, tasksPath, numTasks) {
|
||||
const prdContent = fs.readFileSync(prdPath, 'utf8');
|
||||
|
||||
// Call Claude to generate tasks
|
||||
const tasksData = await callClaude(prdContent, prdPath, numTasks);
|
||||
const tasksData = await callClaude(prdContent, prdPath, numTasks, { reportProgress, mcpLog, session } = {});
|
||||
|
||||
// Create the directory if it doesn't exist
|
||||
const tasksDir = path.dirname(tasksPath);
|
||||
if (!fs.existsSync(tasksDir)) {
|
||||
fs.mkdirSync(tasksDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Write the tasks to the file
|
||||
writeJSON(tasksPath, tasksData);
|
||||
|
||||
log('success', `Successfully generated ${tasksData.tasks.length} tasks from PRD`);
|
||||
log('info', `Tasks saved to: ${tasksPath}`);
|
||||
|
||||
// Generate individual task files
|
||||
if (reportProgress && mcpLog) {
|
||||
// Enable silent mode when being called from MCP server
|
||||
enableSilentMode();
|
||||
await generateTaskFiles(tasksPath, tasksDir);
|
||||
disableSilentMode();
|
||||
} else {
|
||||
await generateTaskFiles(tasksPath, tasksDir);
|
||||
}
|
||||
|
||||
console.log(boxen(
|
||||
chalk.green(`Successfully generated ${tasksData.tasks.length} tasks from PRD`),
|
||||
@@ -132,13 +142,16 @@ async function parsePRD(prdPath, tasksPath, numTasks) {
|
||||
* @param {number} fromId - Task ID to start updating from
|
||||
* @param {string} prompt - Prompt with new context
|
||||
* @param {boolean} useResearch - Whether to use Perplexity AI for research
|
||||
* @param {function} reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} mcpLog - MCP logger object (optional)
|
||||
* @param {Object} session - Session object from MCP server (optional)
|
||||
*/
|
||||
async function updateTasks(tasksPath, fromId, prompt, useResearch = false) {
|
||||
async function updateTasks(tasksPath, fromId, prompt, useResearch = false, { reportProgress, mcpLog, session } = {}) {
|
||||
try {
|
||||
log('info', `Updating tasks from ID ${fromId} with prompt: "${prompt}"`);
|
||||
|
||||
// Validate research flag
|
||||
if (useResearch && (!perplexity || !process.env.PERPLEXITY_API_KEY)) {
|
||||
if (useResearch && (!perplexity || !process.env.PERPLEXITY_API_KEY || session?.env?.PERPLEXITY_API_KEY)) {
|
||||
log('warn', 'Perplexity AI is not available. Falling back to Claude AI.');
|
||||
console.log(chalk.yellow('Perplexity AI is not available (API key may be missing). Falling back to Claude AI.'));
|
||||
useResearch = false;
|
||||
@@ -224,7 +237,7 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
log('info', 'Using Perplexity AI for research-backed task updates');
|
||||
|
||||
// Call Perplexity AI using format consistent with ai-services.js
|
||||
const perplexityModel = process.env.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const perplexityModel = process.env.PERPLEXITY_MODEL || session?.env?.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
messages: [
|
||||
@@ -245,8 +258,8 @@ IMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "statu
|
||||
Return only the updated tasks as a valid JSON array.`
|
||||
}
|
||||
],
|
||||
temperature: parseFloat(process.env.TEMPERATURE || CONFIG.temperature),
|
||||
max_tokens: parseInt(process.env.MAX_TOKENS || CONFIG.maxTokens),
|
||||
temperature: parseFloat(process.env.TEMPERATURE || session?.env?.TEMPERATURE || CONFIG.temperature),
|
||||
max_tokens: parseInt(process.env.MAX_TOKENS || session?.env?.MAX_TOKENS || CONFIG.maxTokens),
|
||||
});
|
||||
|
||||
const responseText = result.choices[0].message.content;
|
||||
@@ -278,9 +291,9 @@ Return only the updated tasks as a valid JSON array.`
|
||||
|
||||
// Use streaming API call
|
||||
const stream = await anthropic.messages.create({
|
||||
model: CONFIG.model,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: CONFIG.temperature,
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
@@ -304,6 +317,13 @@ Return only the updated tasks as a valid JSON array.`
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (responseText.length / CONFIG.maxTokens) * 100 });
|
||||
}
|
||||
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${responseText.length / CONFIG.maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
@@ -366,9 +386,12 @@ Return only the updated tasks as a valid JSON array.`
|
||||
* @param {number} taskId - Task ID to update
|
||||
* @param {string} prompt - Prompt with new context
|
||||
* @param {boolean} useResearch - Whether to use Perplexity AI for research
|
||||
* @param {function} reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} mcpLog - MCP logger object (optional)
|
||||
* @param {Object} session - Session object from MCP server (optional)
|
||||
* @returns {Object} - Updated task data or null if task wasn't updated
|
||||
*/
|
||||
async function updateTaskById(tasksPath, taskId, prompt, useResearch = false) {
|
||||
async function updateTaskById(tasksPath, taskId, prompt, useResearch = false, { reportProgress, mcpLog, session } = {}) {
|
||||
try {
|
||||
log('info', `Updating single task ${taskId} with prompt: "${prompt}"`);
|
||||
|
||||
@@ -383,7 +406,7 @@ async function updateTaskById(tasksPath, taskId, prompt, useResearch = false) {
|
||||
}
|
||||
|
||||
// Validate research flag
|
||||
if (useResearch && (!perplexity || !process.env.PERPLEXITY_API_KEY)) {
|
||||
if (useResearch && (!perplexity || !process.env.PERPLEXITY_API_KEY || session?.env?.PERPLEXITY_API_KEY)) {
|
||||
log('warn', 'Perplexity AI is not available. Falling back to Claude AI.');
|
||||
console.log(chalk.yellow('Perplexity AI is not available (API key may be missing). Falling back to Claude AI.'));
|
||||
useResearch = false;
|
||||
@@ -484,13 +507,13 @@ The changes described in the prompt should be thoughtfully applied to make the t
|
||||
log('info', 'Using Perplexity AI for research-backed task update');
|
||||
|
||||
// Verify Perplexity API key exists
|
||||
if (!process.env.PERPLEXITY_API_KEY) {
|
||||
if (!process.env.PERPLEXITY_API_KEY || session?.env?.PERPLEXITY_API_KEY) {
|
||||
throw new Error('PERPLEXITY_API_KEY environment variable is missing but --research flag was used.');
|
||||
}
|
||||
|
||||
try {
|
||||
// Call Perplexity AI
|
||||
const perplexityModel = process.env.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const perplexityModel = process.env.PERPLEXITY_MODEL || session?.env?.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
messages: [
|
||||
@@ -511,8 +534,8 @@ IMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status
|
||||
Return only the updated task as a valid JSON object.`
|
||||
}
|
||||
],
|
||||
temperature: parseFloat(process.env.TEMPERATURE || CONFIG.temperature),
|
||||
max_tokens: parseInt(process.env.MAX_TOKENS || CONFIG.maxTokens),
|
||||
temperature: parseFloat(process.env.TEMPERATURE || session?.env?.TEMPERATURE || CONFIG.temperature),
|
||||
max_tokens: parseInt(process.env.MAX_TOKENS || session?.env?.MAX_TOKENS || CONFIG.maxTokens),
|
||||
});
|
||||
|
||||
const responseText = result.choices[0].message.content;
|
||||
@@ -542,7 +565,7 @@ Return only the updated task as a valid JSON object.`
|
||||
|
||||
try {
|
||||
// Verify Anthropic API key exists
|
||||
if (!process.env.ANTHROPIC_API_KEY) {
|
||||
if (!process.env.ANTHROPIC_API_KEY || session?.env?.ANTHROPIC_API_KEY) {
|
||||
throw new Error('ANTHROPIC_API_KEY environment variable is missing. Required for task updates.');
|
||||
}
|
||||
|
||||
@@ -557,9 +580,9 @@ Return only the updated task as a valid JSON object.`
|
||||
|
||||
// Use streaming API call
|
||||
const stream = await anthropic.messages.create({
|
||||
model: CONFIG.model,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: CONFIG.temperature,
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
@@ -583,6 +606,12 @@ Return only the updated task as a valid JSON object.`
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (responseText.length / CONFIG.maxTokens) * 100 });
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${responseText.length / CONFIG.maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
@@ -742,6 +771,7 @@ Return only the updated task as a valid JSON object.`
|
||||
function generateTaskFiles(tasksPath, outputDir) {
|
||||
try {
|
||||
log('info', `Reading tasks from ${tasksPath}...`);
|
||||
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
@@ -1014,22 +1044,33 @@ function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat =
|
||||
task.status === 'done' || task.status === 'completed').length;
|
||||
const completionPercentage = totalTasks > 0 ? (completedTasks / totalTasks) * 100 : 0;
|
||||
|
||||
// Count statuses
|
||||
// Count statuses for tasks
|
||||
const doneCount = completedTasks;
|
||||
const inProgressCount = data.tasks.filter(task => task.status === 'in-progress').length;
|
||||
const pendingCount = data.tasks.filter(task => task.status === 'pending').length;
|
||||
const blockedCount = data.tasks.filter(task => task.status === 'blocked').length;
|
||||
const deferredCount = data.tasks.filter(task => task.status === 'deferred').length;
|
||||
const cancelledCount = data.tasks.filter(task => task.status === 'cancelled').length;
|
||||
|
||||
// Count subtasks
|
||||
// Count subtasks and their statuses
|
||||
let totalSubtasks = 0;
|
||||
let completedSubtasks = 0;
|
||||
let inProgressSubtasks = 0;
|
||||
let pendingSubtasks = 0;
|
||||
let blockedSubtasks = 0;
|
||||
let deferredSubtasks = 0;
|
||||
let cancelledSubtasks = 0;
|
||||
|
||||
data.tasks.forEach(task => {
|
||||
if (task.subtasks && task.subtasks.length > 0) {
|
||||
totalSubtasks += task.subtasks.length;
|
||||
completedSubtasks += task.subtasks.filter(st =>
|
||||
st.status === 'done' || st.status === 'completed').length;
|
||||
inProgressSubtasks += task.subtasks.filter(st => st.status === 'in-progress').length;
|
||||
pendingSubtasks += task.subtasks.filter(st => st.status === 'pending').length;
|
||||
blockedSubtasks += task.subtasks.filter(st => st.status === 'blocked').length;
|
||||
deferredSubtasks += task.subtasks.filter(st => st.status === 'deferred').length;
|
||||
cancelledSubtasks += task.subtasks.filter(st => st.status === 'cancelled').length;
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1064,10 +1105,16 @@ function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat =
|
||||
pending: pendingCount,
|
||||
blocked: blockedCount,
|
||||
deferred: deferredCount,
|
||||
cancelled: cancelledCount,
|
||||
completionPercentage,
|
||||
subtasks: {
|
||||
total: totalSubtasks,
|
||||
completed: completedSubtasks,
|
||||
inProgress: inProgressSubtasks,
|
||||
pending: pendingSubtasks,
|
||||
blocked: blockedSubtasks,
|
||||
deferred: deferredSubtasks,
|
||||
cancelled: cancelledSubtasks,
|
||||
completionPercentage: subtaskCompletionPercentage
|
||||
}
|
||||
}
|
||||
@@ -1076,9 +1123,26 @@ function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat =
|
||||
|
||||
// ... existing code for text output ...
|
||||
|
||||
// Create progress bars
|
||||
const taskProgressBar = createProgressBar(completionPercentage, 30);
|
||||
const subtaskProgressBar = createProgressBar(subtaskCompletionPercentage, 30);
|
||||
// Calculate status breakdowns as percentages of total
|
||||
const taskStatusBreakdown = {
|
||||
'in-progress': totalTasks > 0 ? (inProgressCount / totalTasks) * 100 : 0,
|
||||
'pending': totalTasks > 0 ? (pendingCount / totalTasks) * 100 : 0,
|
||||
'blocked': totalTasks > 0 ? (blockedCount / totalTasks) * 100 : 0,
|
||||
'deferred': totalTasks > 0 ? (deferredCount / totalTasks) * 100 : 0,
|
||||
'cancelled': totalTasks > 0 ? (cancelledCount / totalTasks) * 100 : 0
|
||||
};
|
||||
|
||||
const subtaskStatusBreakdown = {
|
||||
'in-progress': totalSubtasks > 0 ? (inProgressSubtasks / totalSubtasks) * 100 : 0,
|
||||
'pending': totalSubtasks > 0 ? (pendingSubtasks / totalSubtasks) * 100 : 0,
|
||||
'blocked': totalSubtasks > 0 ? (blockedSubtasks / totalSubtasks) * 100 : 0,
|
||||
'deferred': totalSubtasks > 0 ? (deferredSubtasks / totalSubtasks) * 100 : 0,
|
||||
'cancelled': totalSubtasks > 0 ? (cancelledSubtasks / totalSubtasks) * 100 : 0
|
||||
};
|
||||
|
||||
// Create progress bars with status breakdowns
|
||||
const taskProgressBar = createProgressBar(completionPercentage, 30, taskStatusBreakdown);
|
||||
const subtaskProgressBar = createProgressBar(subtaskCompletionPercentage, 30, subtaskStatusBreakdown);
|
||||
|
||||
// Calculate dependency statistics
|
||||
const completedTaskIds = new Set(data.tasks.filter(t =>
|
||||
@@ -1163,9 +1227,9 @@ function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat =
|
||||
const projectDashboardContent =
|
||||
chalk.white.bold('Project Dashboard') + '\n' +
|
||||
`Tasks Progress: ${chalk.greenBright(taskProgressBar)} ${completionPercentage.toFixed(0)}%\n` +
|
||||
`Done: ${chalk.green(doneCount)} In Progress: ${chalk.blue(inProgressCount)} Pending: ${chalk.yellow(pendingCount)} Blocked: ${chalk.red(blockedCount)} Deferred: ${chalk.gray(deferredCount)}\n\n` +
|
||||
`Done: ${chalk.green(doneCount)} In Progress: ${chalk.blue(inProgressCount)} Pending: ${chalk.yellow(pendingCount)} Blocked: ${chalk.red(blockedCount)} Deferred: ${chalk.gray(deferredCount)} Cancelled: ${chalk.gray(cancelledCount)}\n\n` +
|
||||
`Subtasks Progress: ${chalk.cyan(subtaskProgressBar)} ${subtaskCompletionPercentage.toFixed(0)}%\n` +
|
||||
`Completed: ${chalk.green(completedSubtasks)}/${totalSubtasks} Remaining: ${chalk.yellow(totalSubtasks - completedSubtasks)}\n\n` +
|
||||
`Completed: ${chalk.green(completedSubtasks)}/${totalSubtasks} In Progress: ${chalk.blue(inProgressSubtasks)} Pending: ${chalk.yellow(pendingSubtasks)} Blocked: ${chalk.red(blockedSubtasks)} Deferred: ${chalk.gray(deferredSubtasks)} Cancelled: ${chalk.gray(cancelledSubtasks)}\n\n` +
|
||||
chalk.cyan.bold('Priority Breakdown:') + '\n' +
|
||||
`${chalk.red('•')} ${chalk.white('High priority:')} ${data.tasks.filter(t => t.priority === 'high').length}\n` +
|
||||
`${chalk.yellow('•')} ${chalk.white('Medium priority:')} ${data.tasks.filter(t => t.priority === 'medium').length}\n` +
|
||||
@@ -1454,7 +1518,8 @@ function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat =
|
||||
'pending': chalk.yellow,
|
||||
'in-progress': chalk.blue,
|
||||
'deferred': chalk.gray,
|
||||
'blocked': chalk.red
|
||||
'blocked': chalk.red,
|
||||
'cancelled': chalk.gray
|
||||
};
|
||||
const statusColor = statusColors[status.toLowerCase()] || chalk.white;
|
||||
return `${chalk.cyan(`${nextTask.id}.${subtask.id}`)} [${statusColor(status)}] ${subtask.title}`;
|
||||
@@ -1999,11 +2064,22 @@ function clearSubtasks(tasksPath, taskIds) {
|
||||
* @param {string} prompt - Description of the task to add
|
||||
* @param {Array} dependencies - Task dependencies
|
||||
* @param {string} priority - Task priority
|
||||
* @param {function} reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} mcpLog - MCP logger object (optional)
|
||||
* @param {Object} session - Session object from MCP server (optional)
|
||||
* @returns {number} The new task ID
|
||||
*/
|
||||
async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium') {
|
||||
async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium', { reportProgress, mcpLog, session } = {}, outputFormat = 'text') {
|
||||
// Only display banner and UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
displayBanner();
|
||||
|
||||
console.log(boxen(
|
||||
chalk.white.bold(`Creating New Task`),
|
||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round', margin: { top: 1, bottom: 1 } }
|
||||
));
|
||||
}
|
||||
|
||||
// Read the existing tasks
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
@@ -2015,10 +2091,13 @@ async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium'
|
||||
const highestId = Math.max(...data.tasks.map(t => t.id));
|
||||
const newTaskId = highestId + 1;
|
||||
|
||||
// Only show UI box for CLI mode
|
||||
if (outputFormat === 'text') {
|
||||
console.log(boxen(
|
||||
chalk.white.bold(`Creating New Task #${newTaskId}`),
|
||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round', margin: { top: 1, bottom: 1 } }
|
||||
));
|
||||
}
|
||||
|
||||
// Validate dependencies before proceeding
|
||||
const invalidDeps = dependencies.filter(depId => {
|
||||
@@ -2068,8 +2147,11 @@ async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium'
|
||||
|
||||
IMPORTANT: Return ONLY the JSON object, nothing else.`;
|
||||
|
||||
// Start the loading indicator
|
||||
const loadingIndicator = startLoadingIndicator('Generating new task with Claude AI...');
|
||||
// Start the loading indicator - only for text mode
|
||||
let loadingIndicator = null;
|
||||
if (outputFormat === 'text') {
|
||||
loadingIndicator = startLoadingIndicator('Generating new task with Claude AI...');
|
||||
}
|
||||
|
||||
let fullResponse = '';
|
||||
let streamingInterval = null;
|
||||
@@ -2077,31 +2159,40 @@ async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium'
|
||||
try {
|
||||
// Call Claude with streaming enabled
|
||||
const stream = await anthropic.messages.create({
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
model: CONFIG.model,
|
||||
temperature: CONFIG.temperature,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
messages: [{ role: "user", content: userPrompt }],
|
||||
system: systemPrompt,
|
||||
stream: true
|
||||
});
|
||||
|
||||
// Update loading indicator to show streaming progress
|
||||
// Update loading indicator to show streaming progress - only for text mode
|
||||
let dotCount = 0;
|
||||
if (outputFormat === 'text') {
|
||||
streamingInterval = setInterval(() => {
|
||||
readline.cursorTo(process.stdout, 0);
|
||||
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
|
||||
dotCount = (dotCount + 1) % 4;
|
||||
}, 500);
|
||||
}
|
||||
|
||||
// Process the stream
|
||||
for await (const chunk of stream) {
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
fullResponse += chunk.delta.text;
|
||||
}
|
||||
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (fullResponse.length / CONFIG.maxTokens) * 100 });
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${fullResponse.length / CONFIG.maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
log('info', "Completed streaming response from Claude API!");
|
||||
log('debug', `Streaming response length: ${fullResponse.length} characters`);
|
||||
@@ -2148,6 +2239,8 @@ async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium'
|
||||
// Write the updated tasks back to the file
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Only show success messages for text mode (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
// Show success message
|
||||
const successBox = boxen(
|
||||
chalk.green(`Successfully added new task #${newTaskId}:\n`) +
|
||||
@@ -2165,11 +2258,12 @@ async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium'
|
||||
`${chalk.cyan('3.')} Run ${chalk.yellow('task-master list --with-subtasks')} to see all tasks`,
|
||||
{ padding: 1, borderColor: 'cyan', borderStyle: 'round', margin: { top: 1 } }
|
||||
));
|
||||
}
|
||||
|
||||
return newTaskId;
|
||||
} catch (error) {
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
|
||||
log('error', "Error generating task:", error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
@@ -2178,8 +2272,11 @@ async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium'
|
||||
/**
|
||||
* Analyzes task complexity and generates expansion recommendations
|
||||
* @param {Object} options Command options
|
||||
* @param {function} reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} mcpLog - MCP logger object (optional)
|
||||
* @param {Object} session - Session object from MCP server (optional)
|
||||
*/
|
||||
async function analyzeTaskComplexity(options) {
|
||||
async function analyzeTaskComplexity(options, { reportProgress, mcpLog, session } = {}) {
|
||||
const tasksPath = options.file || 'tasks/tasks.json';
|
||||
const outputPath = options.output || 'scripts/task-complexity-report.json';
|
||||
const modelOverride = options.model;
|
||||
@@ -2239,7 +2336,7 @@ Your response must be a clean JSON array only, following exactly this format:
|
||||
DO NOT include any text before or after the JSON array. No explanations, no markdown formatting.`;
|
||||
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model: process.env.PERPLEXITY_MODEL || 'sonar-pro',
|
||||
model: process.env.PERPLEXITY_MODEL || session?.env?.PERPLEXITY_MODEL || 'sonar-pro',
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
@@ -2250,8 +2347,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
content: researchPrompt
|
||||
}
|
||||
],
|
||||
temperature: CONFIG.temperature,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
});
|
||||
|
||||
// Extract the response text
|
||||
@@ -2280,9 +2377,9 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
async function useClaudeForComplexityAnalysis() {
|
||||
// Call the LLM API with streaming
|
||||
const stream = await anthropic.messages.create({
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
model: modelOverride || CONFIG.model,
|
||||
temperature: CONFIG.temperature,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
model: modelOverride || CONFIG.model || session?.env?.ANTHROPIC_MODEL,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
messages: [{ role: "user", content: prompt }],
|
||||
system: "You are an expert software architect and project manager analyzing task complexity. Respond only with valid JSON.",
|
||||
stream: true
|
||||
@@ -2301,6 +2398,12 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
fullResponse += chunk.delta.text;
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (fullResponse.length / CONFIG.maxTokens) * 100 });
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${fullResponse.length / CONFIG.maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
|
||||
clearInterval(streamingInterval);
|
||||
@@ -2495,7 +2598,7 @@ Your response must be a clean JSON array only, following exactly this format:
|
||||
DO NOT include any text before or after the JSON array. No explanations, no markdown formatting.`;
|
||||
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model: process.env.PERPLEXITY_MODEL || 'sonar-pro',
|
||||
model: process.env.PERPLEXITY_MODEL || session?.env?.PERPLEXITY_MODEL || 'sonar-pro',
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
@@ -2506,8 +2609,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
content: missingTasksResearchPrompt
|
||||
}
|
||||
],
|
||||
temperature: CONFIG.temperature,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
});
|
||||
|
||||
// Extract the response
|
||||
@@ -2515,9 +2618,9 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
} else {
|
||||
// Use Claude
|
||||
const stream = await anthropic.messages.create({
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
model: modelOverride || CONFIG.model,
|
||||
temperature: CONFIG.temperature,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
model: modelOverride || CONFIG.model || session?.env?.ANTHROPIC_MODEL,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
messages: [{ role: "user", content: missingTasksPrompt }],
|
||||
system: "You are an expert software architect and project manager analyzing task complexity. Respond only with valid JSON.",
|
||||
stream: true
|
||||
@@ -2528,6 +2631,12 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
missingAnalysisResponse += chunk.delta.text;
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (missingAnalysisResponse.length / CONFIG.maxTokens) * 100 });
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${missingAnalysisResponse.length / CONFIG.maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3028,9 +3137,12 @@ async function removeSubtask(tasksPath, subtaskId, convertToTask = false, genera
|
||||
* @param {string} subtaskId - ID of the subtask to update in format "parentId.subtaskId"
|
||||
* @param {string} prompt - Prompt for generating additional information
|
||||
* @param {boolean} useResearch - Whether to use Perplexity AI for research-backed updates
|
||||
* @param {function} reportProgress - Function to report progress to MCP server (optional)
|
||||
* @param {Object} mcpLog - MCP logger object (optional)
|
||||
* @param {Object} session - Session object from MCP server (optional)
|
||||
* @returns {Object|null} - The updated subtask or null if update failed
|
||||
*/
|
||||
async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) {
|
||||
async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false, { reportProgress, mcpLog, session } = {} ) {
|
||||
let loadingIndicator = null;
|
||||
try {
|
||||
log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
|
||||
@@ -3159,15 +3271,15 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
|
||||
if (modelType === 'perplexity') {
|
||||
// Construct Perplexity payload
|
||||
const perplexityModel = process.env.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const perplexityModel = process.env.PERPLEXITY_MODEL || session?.env?.PERPLEXITY_MODEL || 'sonar-pro';
|
||||
const response = await client.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
messages: [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userMessageContent }
|
||||
],
|
||||
temperature: parseFloat(process.env.TEMPERATURE || CONFIG.temperature),
|
||||
max_tokens: parseInt(process.env.MAX_TOKENS || CONFIG.maxTokens),
|
||||
temperature: parseFloat(process.env.TEMPERATURE || session?.env?.TEMPERATURE || CONFIG.temperature),
|
||||
max_tokens: parseInt(process.env.MAX_TOKENS || session?.env?.MAX_TOKENS || CONFIG.maxTokens),
|
||||
});
|
||||
additionalInformation = response.choices[0].message.content.trim();
|
||||
} else { // Claude
|
||||
@@ -3199,6 +3311,12 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({ progress: (responseText.length / CONFIG.maxTokens) * 100 });
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(`Progress: ${responseText.length / CONFIG.maxTokens * 100}%`);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
@@ -3375,6 +3493,156 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a task or subtask from the tasks file
|
||||
* @param {string} tasksPath - Path to the tasks file
|
||||
* @param {string|number} taskId - ID of task or subtask to remove (e.g., '5' or '5.2')
|
||||
* @returns {Object} Result object with success message and removed task info
|
||||
*/
|
||||
async function removeTask(tasksPath, taskId) {
|
||||
try {
|
||||
// Read the tasks file
|
||||
const data = readJSON(tasksPath);
|
||||
if (!data || !data.tasks) {
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Check if the task ID exists
|
||||
if (!taskExists(data.tasks, taskId)) {
|
||||
throw new Error(`Task with ID ${taskId} not found`);
|
||||
}
|
||||
|
||||
// Handle subtask removal (e.g., '5.2')
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
const [parentTaskId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = data.tasks.find(t => t.id === parentTaskId);
|
||||
if (!parentTask || !parentTask.subtasks) {
|
||||
throw new Error(`Parent task with ID ${parentTaskId} or its subtasks not found`);
|
||||
}
|
||||
|
||||
// Find the subtask to remove
|
||||
const subtaskIndex = parentTask.subtasks.findIndex(st => st.id === subtaskId);
|
||||
if (subtaskIndex === -1) {
|
||||
throw new Error(`Subtask with ID ${subtaskId} not found in parent task ${parentTaskId}`);
|
||||
}
|
||||
|
||||
// Store the subtask info before removal for the result
|
||||
const removedSubtask = parentTask.subtasks[subtaskIndex];
|
||||
|
||||
// Remove the subtask
|
||||
parentTask.subtasks.splice(subtaskIndex, 1);
|
||||
|
||||
// Remove references to this subtask in other subtasks' dependencies
|
||||
if (parentTask.subtasks && parentTask.subtasks.length > 0) {
|
||||
parentTask.subtasks.forEach(subtask => {
|
||||
if (subtask.dependencies && subtask.dependencies.includes(subtaskId)) {
|
||||
subtask.dependencies = subtask.dependencies.filter(depId => depId !== subtaskId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Save the updated tasks
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Generate updated task files
|
||||
try {
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
} catch (genError) {
|
||||
log('warn', `Successfully removed subtask but failed to regenerate task files: ${genError.message}`);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: `Successfully removed subtask ${subtaskId} from task ${parentTaskId}`,
|
||||
removedTask: removedSubtask,
|
||||
parentTaskId: parentTaskId
|
||||
};
|
||||
}
|
||||
|
||||
// Handle main task removal
|
||||
const taskIdNum = parseInt(taskId, 10);
|
||||
const taskIndex = data.tasks.findIndex(t => t.id === taskIdNum);
|
||||
if (taskIndex === -1) {
|
||||
throw new Error(`Task with ID ${taskId} not found`);
|
||||
}
|
||||
|
||||
// Store the task info before removal for the result
|
||||
const removedTask = data.tasks[taskIndex];
|
||||
|
||||
// Remove the task
|
||||
data.tasks.splice(taskIndex, 1);
|
||||
|
||||
// Remove references to this task in other tasks' dependencies
|
||||
data.tasks.forEach(task => {
|
||||
if (task.dependencies && task.dependencies.includes(taskIdNum)) {
|
||||
task.dependencies = task.dependencies.filter(depId => depId !== taskIdNum);
|
||||
}
|
||||
});
|
||||
|
||||
// Save the updated tasks
|
||||
writeJSON(tasksPath, data);
|
||||
|
||||
// Delete the task file if it exists
|
||||
const taskFileName = path.join(path.dirname(tasksPath), `task_${taskIdNum.toString().padStart(3, '0')}.txt`);
|
||||
if (fs.existsSync(taskFileName)) {
|
||||
try {
|
||||
fs.unlinkSync(taskFileName);
|
||||
} catch (unlinkError) {
|
||||
log('warn', `Successfully removed task from tasks.json but failed to delete task file: ${unlinkError.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Generate updated task files
|
||||
try {
|
||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||
} catch (genError) {
|
||||
log('warn', `Successfully removed task but failed to regenerate task files: ${genError.message}`);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: `Successfully removed task ${taskId}`,
|
||||
removedTask: removedTask
|
||||
};
|
||||
} catch (error) {
|
||||
log('error', `Error removing task: ${error.message}`);
|
||||
throw {
|
||||
code: 'REMOVE_TASK_ERROR',
|
||||
message: error.message,
|
||||
details: error.stack
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a task with the given ID exists
|
||||
* @param {Array} tasks - Array of tasks to search
|
||||
* @param {string|number} taskId - ID of task or subtask to check
|
||||
* @returns {boolean} Whether the task exists
|
||||
*/
|
||||
function taskExists(tasks, taskId) {
|
||||
// Handle subtask IDs (e.g., "1.2")
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
const [parentIdStr, subtaskIdStr] = taskId.split('.');
|
||||
const parentId = parseInt(parentIdStr, 10);
|
||||
const subtaskId = parseInt(subtaskIdStr, 10);
|
||||
|
||||
// Find the parent task
|
||||
const parentTask = tasks.find(t => t.id === parentId);
|
||||
|
||||
// If parent exists, check if subtask exists
|
||||
return parentTask &&
|
||||
parentTask.subtasks &&
|
||||
parentTask.subtasks.some(st => st.id === subtaskId);
|
||||
}
|
||||
|
||||
// Handle regular task IDs
|
||||
const id = parseInt(taskId, 10);
|
||||
return tasks.some(t => t.id === id);
|
||||
}
|
||||
|
||||
// Export task manager functions
|
||||
export {
|
||||
parsePRD,
|
||||
@@ -3393,4 +3661,7 @@ export {
|
||||
removeSubtask,
|
||||
findNextTask,
|
||||
analyzeTaskComplexity,
|
||||
removeTask,
|
||||
findTaskById,
|
||||
taskExists,
|
||||
};
|
||||
@@ -79,19 +79,112 @@ function stopLoadingIndicator(spinner) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a progress bar using ASCII characters
|
||||
* @param {number} percent - Progress percentage (0-100)
|
||||
* @param {number} length - Length of the progress bar in characters
|
||||
* @returns {string} Formatted progress bar
|
||||
* Create a colored progress bar
|
||||
* @param {number} percent - The completion percentage
|
||||
* @param {number} length - The total length of the progress bar in characters
|
||||
* @param {Object} statusBreakdown - Optional breakdown of non-complete statuses (e.g., {pending: 20, 'in-progress': 10})
|
||||
* @returns {string} The formatted progress bar
|
||||
*/
|
||||
function createProgressBar(percent, length = 30) {
|
||||
const filled = Math.round(percent * length / 100);
|
||||
const empty = length - filled;
|
||||
function createProgressBar(percent, length = 30, statusBreakdown = null) {
|
||||
// Adjust the percent to treat deferred and cancelled as complete
|
||||
const effectivePercent = statusBreakdown ?
|
||||
Math.min(100, percent + (statusBreakdown.deferred || 0) + (statusBreakdown.cancelled || 0)) :
|
||||
percent;
|
||||
|
||||
const filledBar = '█'.repeat(filled);
|
||||
const emptyBar = '░'.repeat(empty);
|
||||
// Calculate how many characters to fill for "true completion"
|
||||
const trueCompletedFilled = Math.round(percent * length / 100);
|
||||
|
||||
return `${filledBar}${emptyBar} ${percent.toFixed(0)}%`;
|
||||
// Calculate how many characters to fill for "effective completion" (including deferred/cancelled)
|
||||
const effectiveCompletedFilled = Math.round(effectivePercent * length / 100);
|
||||
|
||||
// The "deferred/cancelled" section (difference between true and effective)
|
||||
const deferredCancelledFilled = effectiveCompletedFilled - trueCompletedFilled;
|
||||
|
||||
// Set the empty section (remaining after effective completion)
|
||||
const empty = length - effectiveCompletedFilled;
|
||||
|
||||
// Determine color based on percentage for the completed section
|
||||
let completedColor;
|
||||
if (percent < 25) {
|
||||
completedColor = chalk.red;
|
||||
} else if (percent < 50) {
|
||||
completedColor = chalk.hex('#FFA500'); // Orange
|
||||
} else if (percent < 75) {
|
||||
completedColor = chalk.yellow;
|
||||
} else if (percent < 100) {
|
||||
completedColor = chalk.green;
|
||||
} else {
|
||||
completedColor = chalk.hex('#006400'); // Dark green
|
||||
}
|
||||
|
||||
// Create colored sections
|
||||
const completedSection = completedColor('█'.repeat(trueCompletedFilled));
|
||||
|
||||
// Gray section for deferred/cancelled items
|
||||
const deferredCancelledSection = chalk.gray('█'.repeat(deferredCancelledFilled));
|
||||
|
||||
// If we have a status breakdown, create a multi-colored remaining section
|
||||
let remainingSection = '';
|
||||
|
||||
if (statusBreakdown && empty > 0) {
|
||||
// Status colors (matching the statusConfig colors in getStatusWithColor)
|
||||
const statusColors = {
|
||||
'pending': chalk.yellow,
|
||||
'in-progress': chalk.hex('#FFA500'), // Orange
|
||||
'blocked': chalk.red,
|
||||
'review': chalk.magenta,
|
||||
// Deferred and cancelled are treated as part of the completed section
|
||||
};
|
||||
|
||||
// Calculate proportions for each status
|
||||
const totalRemaining = Object.entries(statusBreakdown)
|
||||
.filter(([status]) => !['deferred', 'cancelled', 'done', 'completed'].includes(status))
|
||||
.reduce((sum, [_, val]) => sum + val, 0);
|
||||
|
||||
// If no remaining tasks with tracked statuses, just use gray
|
||||
if (totalRemaining <= 0) {
|
||||
remainingSection = chalk.gray('░'.repeat(empty));
|
||||
} else {
|
||||
// Track how many characters we've added
|
||||
let addedChars = 0;
|
||||
|
||||
// Add each status section proportionally
|
||||
for (const [status, percentage] of Object.entries(statusBreakdown)) {
|
||||
// Skip statuses that are considered complete
|
||||
if (['deferred', 'cancelled', 'done', 'completed'].includes(status)) continue;
|
||||
|
||||
// Calculate how many characters this status should fill
|
||||
const statusChars = Math.round((percentage / totalRemaining) * empty);
|
||||
|
||||
// Make sure we don't exceed the total length due to rounding
|
||||
const actualChars = Math.min(statusChars, empty - addedChars);
|
||||
|
||||
// Add colored section for this status
|
||||
const colorFn = statusColors[status] || chalk.gray;
|
||||
remainingSection += colorFn('░'.repeat(actualChars));
|
||||
|
||||
addedChars += actualChars;
|
||||
}
|
||||
|
||||
// If we have any remaining space due to rounding, fill with gray
|
||||
if (addedChars < empty) {
|
||||
remainingSection += chalk.gray('░'.repeat(empty - addedChars));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Default to gray for the empty section if no breakdown provided
|
||||
remainingSection = chalk.gray('░'.repeat(empty));
|
||||
}
|
||||
|
||||
// Effective percentage text color should reflect the highest category
|
||||
const percentTextColor = percent === 100 ?
|
||||
chalk.hex('#006400') : // Dark green for 100%
|
||||
(effectivePercent === 100 ?
|
||||
chalk.gray : // Gray for 100% with deferred/cancelled
|
||||
completedColor); // Otherwise match the completed color
|
||||
|
||||
// Build the complete progress bar
|
||||
return `${completedSection}${deferredCancelledSection}${remainingSection} ${percentTextColor(`${effectivePercent.toFixed(0)}%`)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -112,7 +205,8 @@ function getStatusWithColor(status, forTable = false) {
|
||||
'in-progress': { color: chalk.hex('#FFA500'), icon: '🔄', tableIcon: '►' },
|
||||
'deferred': { color: chalk.gray, icon: '⏱️', tableIcon: '⏱' },
|
||||
'blocked': { color: chalk.red, icon: '❌', tableIcon: '✗' },
|
||||
'review': { color: chalk.magenta, icon: '👀', tableIcon: '👁' }
|
||||
'review': { color: chalk.magenta, icon: '👀', tableIcon: '👁' },
|
||||
'cancelled': { color: chalk.gray, icon: '❌', tableIcon: '✗' }
|
||||
};
|
||||
|
||||
const config = statusConfig[status.toLowerCase()] || { color: chalk.red, icon: '❌', tableIcon: '✗' };
|
||||
@@ -695,6 +789,61 @@ async function displayTaskById(tasksPath, taskId) {
|
||||
{ padding: { top: 0, bottom: 0, left: 1, right: 1 }, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
|
||||
));
|
||||
|
||||
// Calculate and display subtask completion progress
|
||||
if (task.subtasks && task.subtasks.length > 0) {
|
||||
const totalSubtasks = task.subtasks.length;
|
||||
const completedSubtasks = task.subtasks.filter(st =>
|
||||
st.status === 'done' || st.status === 'completed'
|
||||
).length;
|
||||
|
||||
// Count other statuses for the subtasks
|
||||
const inProgressSubtasks = task.subtasks.filter(st => st.status === 'in-progress').length;
|
||||
const pendingSubtasks = task.subtasks.filter(st => st.status === 'pending').length;
|
||||
const blockedSubtasks = task.subtasks.filter(st => st.status === 'blocked').length;
|
||||
const deferredSubtasks = task.subtasks.filter(st => st.status === 'deferred').length;
|
||||
const cancelledSubtasks = task.subtasks.filter(st => st.status === 'cancelled').length;
|
||||
|
||||
// Calculate status breakdown as percentages
|
||||
const statusBreakdown = {
|
||||
'in-progress': (inProgressSubtasks / totalSubtasks) * 100,
|
||||
'pending': (pendingSubtasks / totalSubtasks) * 100,
|
||||
'blocked': (blockedSubtasks / totalSubtasks) * 100,
|
||||
'deferred': (deferredSubtasks / totalSubtasks) * 100,
|
||||
'cancelled': (cancelledSubtasks / totalSubtasks) * 100
|
||||
};
|
||||
|
||||
const completionPercentage = (completedSubtasks / totalSubtasks) * 100;
|
||||
|
||||
// Calculate appropriate progress bar length based on terminal width
|
||||
// Subtract padding (2), borders (2), and the percentage text (~5)
|
||||
const availableWidth = process.stdout.columns || 80; // Default to 80 if can't detect
|
||||
const boxPadding = 2; // 1 on each side
|
||||
const boxBorders = 2; // 1 on each side
|
||||
const percentTextLength = 5; // ~5 chars for " 100%"
|
||||
// Reduce the length by adjusting the subtraction value from 20 to 35
|
||||
const progressBarLength = Math.max(20, Math.min(60, availableWidth - boxPadding - boxBorders - percentTextLength - 35)); // Min 20, Max 60
|
||||
|
||||
// Status counts for display
|
||||
const statusCounts =
|
||||
`${chalk.green('✓ Done:')} ${completedSubtasks} ${chalk.hex('#FFA500')('► In Progress:')} ${inProgressSubtasks} ${chalk.yellow('○ Pending:')} ${pendingSubtasks}\n` +
|
||||
`${chalk.red('! Blocked:')} ${blockedSubtasks} ${chalk.gray('⏱ Deferred:')} ${deferredSubtasks} ${chalk.gray('✗ Cancelled:')} ${cancelledSubtasks}`;
|
||||
|
||||
console.log(boxen(
|
||||
chalk.white.bold('Subtask Progress:') + '\n\n' +
|
||||
`${chalk.cyan('Completed:')} ${completedSubtasks}/${totalSubtasks} (${completionPercentage.toFixed(1)}%)\n` +
|
||||
`${statusCounts}\n` +
|
||||
`${chalk.cyan('Progress:')} ${createProgressBar(completionPercentage, progressBarLength, statusBreakdown)}`,
|
||||
{
|
||||
padding: { top: 0, bottom: 0, left: 1, right: 1 },
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 0 },
|
||||
width: Math.min(availableWidth - 10, 100), // Add width constraint to limit the box width
|
||||
textAlignment: 'left'
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -851,6 +1000,61 @@ async function displayTaskById(tasksPath, taskId) {
|
||||
});
|
||||
|
||||
console.log(subtaskTable.toString());
|
||||
|
||||
// Calculate and display subtask completion progress
|
||||
if (task.subtasks && task.subtasks.length > 0) {
|
||||
const totalSubtasks = task.subtasks.length;
|
||||
const completedSubtasks = task.subtasks.filter(st =>
|
||||
st.status === 'done' || st.status === 'completed'
|
||||
).length;
|
||||
|
||||
// Count other statuses for the subtasks
|
||||
const inProgressSubtasks = task.subtasks.filter(st => st.status === 'in-progress').length;
|
||||
const pendingSubtasks = task.subtasks.filter(st => st.status === 'pending').length;
|
||||
const blockedSubtasks = task.subtasks.filter(st => st.status === 'blocked').length;
|
||||
const deferredSubtasks = task.subtasks.filter(st => st.status === 'deferred').length;
|
||||
const cancelledSubtasks = task.subtasks.filter(st => st.status === 'cancelled').length;
|
||||
|
||||
// Calculate status breakdown as percentages
|
||||
const statusBreakdown = {
|
||||
'in-progress': (inProgressSubtasks / totalSubtasks) * 100,
|
||||
'pending': (pendingSubtasks / totalSubtasks) * 100,
|
||||
'blocked': (blockedSubtasks / totalSubtasks) * 100,
|
||||
'deferred': (deferredSubtasks / totalSubtasks) * 100,
|
||||
'cancelled': (cancelledSubtasks / totalSubtasks) * 100
|
||||
};
|
||||
|
||||
const completionPercentage = (completedSubtasks / totalSubtasks) * 100;
|
||||
|
||||
// Calculate appropriate progress bar length based on terminal width
|
||||
// Subtract padding (2), borders (2), and the percentage text (~5)
|
||||
const availableWidth = process.stdout.columns || 80; // Default to 80 if can't detect
|
||||
const boxPadding = 2; // 1 on each side
|
||||
const boxBorders = 2; // 1 on each side
|
||||
const percentTextLength = 5; // ~5 chars for " 100%"
|
||||
// Reduce the length by adjusting the subtraction value from 20 to 35
|
||||
const progressBarLength = Math.max(20, Math.min(60, availableWidth - boxPadding - boxBorders - percentTextLength - 35)); // Min 20, Max 60
|
||||
|
||||
// Status counts for display
|
||||
const statusCounts =
|
||||
`${chalk.green('✓ Done:')} ${completedSubtasks} ${chalk.hex('#FFA500')('► In Progress:')} ${inProgressSubtasks} ${chalk.yellow('○ Pending:')} ${pendingSubtasks}\n` +
|
||||
`${chalk.red('! Blocked:')} ${blockedSubtasks} ${chalk.gray('⏱ Deferred:')} ${deferredSubtasks} ${chalk.gray('✗ Cancelled:')} ${cancelledSubtasks}`;
|
||||
|
||||
console.log(boxen(
|
||||
chalk.white.bold('Subtask Progress:') + '\n\n' +
|
||||
`${chalk.cyan('Completed:')} ${completedSubtasks}/${totalSubtasks} (${completionPercentage.toFixed(1)}%)\n` +
|
||||
`${statusCounts}\n` +
|
||||
`${chalk.cyan('Progress:')} ${createProgressBar(completionPercentage, progressBarLength, statusBreakdown)}`,
|
||||
{
|
||||
padding: { top: 0, bottom: 0, left: 1, right: 1 },
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, bottom: 0 },
|
||||
width: Math.min(availableWidth - 10, 100), // Add width constraint to limit the box width
|
||||
textAlignment: 'left'
|
||||
}
|
||||
));
|
||||
}
|
||||
} else {
|
||||
// Suggest expanding if no subtasks
|
||||
console.log(boxen(
|
||||
|
||||
@@ -20,6 +20,9 @@ const CONFIG = {
|
||||
projectVersion: "1.5.0" // Hardcoded version - ALWAYS use this value, ignore environment variable
|
||||
};
|
||||
|
||||
// Global silent mode flag
|
||||
let silentMode = false;
|
||||
|
||||
// Set up logging based on log level
|
||||
const LOG_LEVELS = {
|
||||
debug: 0,
|
||||
@@ -28,23 +31,51 @@ const LOG_LEVELS = {
|
||||
error: 3
|
||||
};
|
||||
|
||||
/**
|
||||
* Enable silent logging mode
|
||||
*/
|
||||
function enableSilentMode() {
|
||||
silentMode = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable silent logging mode
|
||||
*/
|
||||
function disableSilentMode() {
|
||||
silentMode = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if silent mode is enabled
|
||||
* @returns {boolean} True if silent mode is enabled
|
||||
*/
|
||||
function isSilentMode() {
|
||||
return silentMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs a message at the specified level
|
||||
* @param {string} level - The log level (debug, info, warn, error)
|
||||
* @param {...any} args - Arguments to log
|
||||
*/
|
||||
function log(level, ...args) {
|
||||
const icons = {
|
||||
debug: chalk.gray('🔍'),
|
||||
info: chalk.blue('ℹ️'),
|
||||
warn: chalk.yellow('⚠️'),
|
||||
error: chalk.red('❌'),
|
||||
success: chalk.green('✅')
|
||||
// Skip logging if silent mode is enabled
|
||||
if (silentMode) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Use text prefixes instead of emojis
|
||||
const prefixes = {
|
||||
debug: chalk.gray("[DEBUG]"),
|
||||
info: chalk.blue("[INFO]"),
|
||||
warn: chalk.yellow("[WARN]"),
|
||||
error: chalk.red("[ERROR]"),
|
||||
success: chalk.green("[SUCCESS]")
|
||||
};
|
||||
|
||||
if (LOG_LEVELS[level] >= LOG_LEVELS[CONFIG.logLevel]) {
|
||||
const icon = icons[level] || '';
|
||||
console.log(`${icon} ${args.join(' ')}`);
|
||||
const prefix = prefixes[level] || "";
|
||||
console.log(`${prefix} ${args.join(' ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -337,5 +368,8 @@ export {
|
||||
truncate,
|
||||
findCycles,
|
||||
toKebabCase,
|
||||
detectCamelCaseFlags
|
||||
detectCamelCaseFlags,
|
||||
enableSilentMode,
|
||||
disableSilentMode,
|
||||
isSilentMode
|
||||
};
|
||||
@@ -129,6 +129,7 @@ function preparePackage() {
|
||||
'assets/example_prd.txt',
|
||||
'assets/scripts_README.md',
|
||||
'.cursor/rules/dev_workflow.mdc',
|
||||
'.cursor/rules/taskmaster.mdc',
|
||||
'.cursor/rules/cursor_rules.mdc',
|
||||
'.cursor/rules/self_improve.mdc'
|
||||
];
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Task ID: 1
|
||||
# Title: Implement Task Data Structure
|
||||
# Status: in-progress
|
||||
# Status: done
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Design and implement the core tasks.json structure that will serve as the single source of truth for the system.
|
||||
|
||||
@@ -16,6 +16,8 @@ This task involves completing the Model Context Protocol (MCP) server implementa
|
||||
7. Integrate the ModelContextProtocol SDK directly to streamline resource and tool registration, ensuring compatibility with FastMCP's transport mechanisms.
|
||||
8. Identify and address missing components or functionalities to meet FastMCP best practices, such as robust error handling, monitoring endpoints, and concurrency support.
|
||||
9. Update documentation to include examples of using the MCP server with FastMCP, detailed setup instructions, and client integration guides.
|
||||
10. Organize direct function implementations in a modular structure within the mcp-server/src/core/direct-functions/ directory for improved maintainability and organization.
|
||||
11. Follow consistent naming conventions: file names use kebab-case (like-this.js), direct functions use camelCase with Direct suffix (functionNameDirect), tool registration functions use camelCase with Tool suffix (registerToolNameTool), and MCP tool names exposed to clients use snake_case (tool_name).
|
||||
|
||||
The implementation must ensure compatibility with existing MCP clients and follow RESTful API design principles, while supporting concurrent requests and maintaining robust error handling.
|
||||
|
||||
@@ -28,15 +30,17 @@ Testing for the MCP server implementation will follow a comprehensive approach b
|
||||
- Test individual MCP server components in isolation
|
||||
- Mock all external dependencies including FastMCP SDK
|
||||
- Test each tool implementation separately
|
||||
- Test each direct function implementation in the direct-functions directory
|
||||
- Verify direct function imports work correctly
|
||||
- Test context management and caching mechanisms
|
||||
- Example files: `context-manager.test.js`, `tool-registration.test.js`, `direct-imports.test.js`
|
||||
- Example files: `context-manager.test.js`, `tool-registration.test.js`, `direct-functions/list-tasks.test.js`
|
||||
|
||||
2. **Integration Tests** (`tests/integration/mcp-server/`):
|
||||
- Test interactions between MCP server components
|
||||
- Verify proper tool registration with FastMCP
|
||||
- Test context flow between components
|
||||
- Validate error handling across module boundaries
|
||||
- Test the integration between direct functions and their corresponding MCP tools
|
||||
- Example files: `server-tool-integration.test.js`, `context-flow.test.js`
|
||||
|
||||
3. **End-to-End Tests** (`tests/e2e/mcp-server/`):
|
||||
@@ -73,6 +77,12 @@ import { MCPServer, MCPError } from '@model-context-protocol/sdk';
|
||||
import { initMCPServer } from '../../scripts/mcp-server.js';
|
||||
```
|
||||
|
||||
### Direct Function Testing
|
||||
- Test each direct function in isolation
|
||||
- Verify proper error handling and return formats
|
||||
- Test with various input parameters and edge cases
|
||||
- Verify integration with the task-master-core.js export hub
|
||||
|
||||
### Context Management Testing
|
||||
- Test context creation, retrieval, and manipulation
|
||||
- Verify caching mechanisms work correctly
|
||||
@@ -136,6 +146,11 @@ import { initMCPServer } from '../../scripts/mcp-server.js';
|
||||
- Verify proper message formatting
|
||||
- Test error handling in transport layer
|
||||
|
||||
6. **Direct Function Structure**
|
||||
- Test the modular organization of direct functions
|
||||
- Verify proper import/export through task-master-core.js
|
||||
- Test utility functions in the utils directory
|
||||
|
||||
All tests will be automated and integrated into the CI/CD pipeline to ensure consistent quality.
|
||||
|
||||
# Subtasks:
|
||||
@@ -206,7 +221,7 @@ Testing approach:
|
||||
- Test error handling with invalid inputs
|
||||
- Benchmark endpoint performance
|
||||
|
||||
## 6. Refactor MCP Server to Leverage ModelContextProtocol SDK [deferred]
|
||||
## 6. Refactor MCP Server to Leverage ModelContextProtocol SDK [cancelled]
|
||||
### Dependencies: 23.1, 23.2, 23.3
|
||||
### Description: Integrate the ModelContextProtocol SDK directly into the MCP server implementation to streamline tool registration and resource handling.
|
||||
### Details:
|
||||
@@ -222,6 +237,17 @@ Testing approach:
|
||||
- Validate compatibility with existing MCP clients.
|
||||
- Benchmark performance improvements from SDK integration.
|
||||
|
||||
<info added on 2025-03-31T18:49:14.439Z>
|
||||
The subtask is being cancelled because FastMCP already serves as a higher-level abstraction over the Model Context Protocol SDK. Direct integration with the MCP SDK would be redundant and potentially counterproductive since:
|
||||
|
||||
1. FastMCP already encapsulates the necessary SDK functionality for tool registration and resource handling
|
||||
2. The existing FastMCP abstractions provide a more streamlined developer experience
|
||||
3. Adding another layer of SDK integration would increase complexity without clear benefits
|
||||
4. The transport mechanisms in FastMCP are already optimized for the current architecture
|
||||
|
||||
Instead, we should focus on extending and enhancing the existing FastMCP abstractions where needed, rather than attempting to bypass them with direct SDK integration.
|
||||
</info added on 2025-03-31T18:49:14.439Z>
|
||||
|
||||
## 8. Implement Direct Function Imports and Replace CLI-based Execution [done]
|
||||
### Dependencies: 23.13
|
||||
### Description: Refactor the MCP server implementation to use direct Task Master function imports instead of the current CLI-based execution using child_process.spawnSync. This will improve performance, reliability, and enable better error handling.
|
||||
@@ -316,13 +342,83 @@ function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat =
|
||||
7. Add validation for tool inputs using FastMCP's built-in validation
|
||||
8. Create comprehensive tests for tool registration and resource access
|
||||
|
||||
<info added on 2025-03-31T18:35:21.513Z>
|
||||
Here is additional information to enhance the subtask regarding resources and resource templates in FastMCP:
|
||||
|
||||
Resources in FastMCP are used to expose static or dynamic data to LLM clients. For the Task Master MCP server, we should implement resources to provide:
|
||||
|
||||
1. Task templates: Predefined task structures that can be used as starting points
|
||||
2. Workflow definitions: Reusable workflow patterns for common task sequences
|
||||
3. User preferences: Stored user settings for task management
|
||||
4. Project metadata: Information about active projects and their attributes
|
||||
|
||||
Resource implementation should follow this structure:
|
||||
|
||||
```python
|
||||
@mcp.resource("tasks://templates/{template_id}")
|
||||
def get_task_template(template_id: str) -> dict:
|
||||
# Fetch and return the specified task template
|
||||
...
|
||||
|
||||
@mcp.resource("workflows://definitions/{workflow_id}")
|
||||
def get_workflow_definition(workflow_id: str) -> dict:
|
||||
# Fetch and return the specified workflow definition
|
||||
...
|
||||
|
||||
@mcp.resource("users://{user_id}/preferences")
|
||||
def get_user_preferences(user_id: str) -> dict:
|
||||
# Fetch and return user preferences
|
||||
...
|
||||
|
||||
@mcp.resource("projects://metadata")
|
||||
def get_project_metadata() -> List[dict]:
|
||||
# Fetch and return metadata for all active projects
|
||||
...
|
||||
```
|
||||
|
||||
Resource templates in FastMCP allow for dynamic generation of resources based on patterns. For Task Master, we can implement:
|
||||
|
||||
1. Dynamic task creation templates
|
||||
2. Customizable workflow templates
|
||||
3. User-specific resource views
|
||||
|
||||
Example implementation:
|
||||
|
||||
```python
|
||||
@mcp.resource("tasks://create/{task_type}")
|
||||
def get_task_creation_template(task_type: str) -> dict:
|
||||
# Generate and return a task creation template based on task_type
|
||||
...
|
||||
|
||||
@mcp.resource("workflows://custom/{user_id}/{workflow_name}")
|
||||
def get_custom_workflow_template(user_id: str, workflow_name: str) -> dict:
|
||||
# Generate and return a custom workflow template for the user
|
||||
...
|
||||
|
||||
@mcp.resource("users://{user_id}/dashboard")
|
||||
def get_user_dashboard(user_id: str) -> dict:
|
||||
# Generate and return a personalized dashboard view for the user
|
||||
...
|
||||
```
|
||||
|
||||
Best practices for integrating resources with Task Master functionality:
|
||||
|
||||
1. Use resources to provide context and data for tools
|
||||
2. Implement caching for frequently accessed resources
|
||||
3. Ensure proper error handling and not-found cases for all resources
|
||||
4. Use resource templates to generate dynamic, personalized views of data
|
||||
5. Implement access control to ensure users only access authorized resources
|
||||
|
||||
By properly implementing these resources and resource templates, we can provide rich, contextual data to LLM clients, enhancing the Task Master's capabilities and user experience.
|
||||
</info added on 2025-03-31T18:35:21.513Z>
|
||||
|
||||
## 11. Implement Comprehensive Error Handling [deferred]
|
||||
### Dependencies: 23.1, 23.3
|
||||
### Description: Implement robust error handling using FastMCP's MCPError, including custom error types for different categories and standardized error responses.
|
||||
### Details:
|
||||
1. Create custom error types extending MCPError for different categories (validation, auth, etc.)\n2. Implement standardized error responses following MCP protocol\n3. Add error handling middleware for all MCP endpoints\n4. Ensure proper error propagation from tools to client\n5. Add debug mode with detailed error information\n6. Document error types and handling patterns
|
||||
|
||||
## 12. Implement Structured Logging System [deferred]
|
||||
## 12. Implement Structured Logging System [done]
|
||||
### Dependencies: 23.1, 23.3
|
||||
### Description: Implement a comprehensive logging system for the MCP server with different log levels, structured logging format, and request/response tracking.
|
||||
### Details:
|
||||
@@ -346,93 +442,768 @@ function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat =
|
||||
### Details:
|
||||
1. Research and implement SSE protocol for the MCP server\n2. Create dedicated SSE endpoints for event streaming\n3. Implement event emitter pattern for internal event management\n4. Add support for different event types (task status, logs, errors)\n5. Implement client connection management with proper keep-alive handling\n6. Add filtering capabilities to allow subscribing to specific event types\n7. Create in-memory event buffer for clients reconnecting\n8. Document SSE endpoint usage and client implementation examples\n9. Add robust error handling for dropped connections\n10. Implement rate limiting and backpressure mechanisms\n11. Add authentication for SSE connections
|
||||
|
||||
## 16. Implement parse-prd MCP command [pending]
|
||||
## 16. Implement parse-prd MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for parsing PRD documents to generate tasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create parsePRDDirect function in task-master-core.js:\n - Import parsePRD from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: input file, output path, numTasks\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create parse-prd.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import parsePRDDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerParsePRDTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for parsePRDDirect\n - Integration test for MCP tool
|
||||
|
||||
## 17. Implement update MCP command [pending]
|
||||
## 17. Implement update MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for updating multiple tasks based on prompt.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create updateTasksDirect function in task-master-core.js:\n - Import updateTasks from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: fromId, prompt, useResearch\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create update.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import updateTasksDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerUpdateTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for updateTasksDirect\n - Integration test for MCP tool
|
||||
|
||||
## 18. Implement update-task MCP command [pending]
|
||||
## 18. Implement update-task MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for updating a single task by ID with new information.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create updateTaskByIdDirect function in task-master-core.js:\n - Import updateTaskById from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId, prompt, useResearch\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create update-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import updateTaskByIdDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerUpdateTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for updateTaskByIdDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 19. Implement update-subtask MCP command [pending]
|
||||
1. Create updateTaskByIdDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import updateTaskById from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: taskId, prompt, useResearch
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create update-task.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import updateTaskByIdDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerUpdateTaskTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for updateTaskByIdDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 19. Implement update-subtask MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for appending information to a specific subtask.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create updateSubtaskByIdDirect function in task-master-core.js:\n - Import updateSubtaskById from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: subtaskId, prompt, useResearch\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create update-subtask.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import updateSubtaskByIdDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerUpdateSubtaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for updateSubtaskByIdDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 20. Implement generate MCP command [pending]
|
||||
1. Create updateSubtaskByIdDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import updateSubtaskById from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: subtaskId, prompt, useResearch
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create update-subtask.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import updateSubtaskByIdDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerUpdateSubtaskTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for updateSubtaskByIdDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 20. Implement generate MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for generating task files from tasks.json.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create generateTaskFilesDirect function in task-master-core.js:\n - Import generateTaskFiles from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: tasksPath, outputDir\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create generate.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import generateTaskFilesDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerGenerateTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for generateTaskFilesDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 21. Implement set-status MCP command [pending]
|
||||
1. Create generateTaskFilesDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import generateTaskFiles from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: tasksPath, outputDir
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create generate.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import generateTaskFilesDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerGenerateTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for generateTaskFilesDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 21. Implement set-status MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for setting task status.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create setTaskStatusDirect function in task-master-core.js:\n - Import setTaskStatus from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId, status\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create set-status.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import setTaskStatusDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerSetStatusTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for setTaskStatusDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 22. Implement show-task MCP command [pending]
|
||||
1. Create setTaskStatusDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import setTaskStatus from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: taskId, status
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create set-status.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import setTaskStatusDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerSetStatusTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for setTaskStatusDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 22. Implement show-task MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for showing task details.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create showTaskDirect function in task-master-core.js:\n - Import showTask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create show-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import showTaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerShowTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for showTaskDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 23. Implement next-task MCP command [pending]
|
||||
1. Create showTaskDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import showTask from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: taskId
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create show-task.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import showTaskDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerShowTaskTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'show_task'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for showTaskDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 23. Implement next-task MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for finding the next task to work on.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create nextTaskDirect function in task-master-core.js:\n - Import nextTask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments (no specific args needed except projectRoot/file)\n - Handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create next-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import nextTaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerNextTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for nextTaskDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 24. Implement expand-task MCP command [pending]
|
||||
1. Create nextTaskDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import nextTask from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments (no specific args needed except projectRoot/file)
|
||||
- Handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create next-task.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import nextTaskDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerNextTaskTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'next_task'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for nextTaskDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 24. Implement expand-task MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for expanding a task into subtasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create expandTaskDirect function in task-master-core.js:\n - Import expandTask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId, prompt, num, force, research\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create expand-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import expandTaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerExpandTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for expandTaskDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 25. Implement add-task MCP command [pending]
|
||||
1. Create expandTaskDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import expandTask from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: taskId, prompt, num, force, research
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create expand-task.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import expandTaskDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerExpandTaskTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'expand_task'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for expandTaskDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 25. Implement add-task MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for adding new tasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create addTaskDirect function in task-master-core.js:\n - Import addTask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: prompt, priority, dependencies\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create add-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import addTaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerAddTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for addTaskDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 26. Implement add-subtask MCP command [pending]
|
||||
1. Create addTaskDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import addTask from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: prompt, priority, dependencies
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create add-task.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import addTaskDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerAddTaskTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'add_task'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for addTaskDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 26. Implement add-subtask MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for adding subtasks to existing tasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create addSubtaskDirect function in task-master-core.js:\n - Import addSubtask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: parentTaskId, title, description, details\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create add-subtask.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import addSubtaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerAddSubtaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for addSubtaskDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 27. Implement remove-subtask MCP command [pending]
|
||||
1. Create addSubtaskDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import addSubtask from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: parentTaskId, title, description, details
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create add-subtask.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import addSubtaskDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerAddSubtaskTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'add_subtask'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for addSubtaskDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 27. Implement remove-subtask MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for removing subtasks from tasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create removeSubtaskDirect function in task-master-core.js:\n - Import removeSubtask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: parentTaskId, subtaskId\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create remove-subtask.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import removeSubtaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerRemoveSubtaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for removeSubtaskDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 28. Implement analyze MCP command [pending]
|
||||
1. Create removeSubtaskDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import removeSubtask from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: parentTaskId, subtaskId
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create remove-subtask.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import removeSubtaskDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerRemoveSubtaskTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'remove_subtask'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for removeSubtaskDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 28. Implement analyze MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for analyzing task complexity.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create analyzeTaskComplexityDirect function in task-master-core.js:\n - Import analyzeTaskComplexity from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create analyze.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import analyzeTaskComplexityDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerAnalyzeTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for analyzeTaskComplexityDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 29. Implement clear-subtasks MCP command [pending]
|
||||
1. Create analyzeTaskComplexityDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import analyzeTaskComplexity from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: taskId
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create analyze.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import analyzeTaskComplexityDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerAnalyzeTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'analyze'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for analyzeTaskComplexityDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 29. Implement clear-subtasks MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for clearing subtasks from a parent task.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create clearSubtasksDirect function in task-master-core.js:\n - Import clearSubtasks from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create clear-subtasks.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import clearSubtasksDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerClearSubtasksTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for clearSubtasksDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
## 30. Implement expand-all MCP command [pending]
|
||||
1. Create clearSubtasksDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import clearSubtasks from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: taskId
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create clear-subtasks.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import clearSubtasksDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerClearSubtasksTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'clear_subtasks'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for clearSubtasksDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 30. Implement expand-all MCP command [done]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for expanding all tasks into subtasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create expandAllTasksDirect function in task-master-core.js:\n - Import expandAllTasks from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: prompt, num, force, research\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create expand-all.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import expandAllTasksDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerExpandAllTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for expandAllTasksDirect\n - Integration test for MCP tool
|
||||
Following MCP implementation standards:
|
||||
|
||||
1. Create expandAllTasksDirect.js in mcp-server/src/core/direct-functions/:
|
||||
- Import expandAllTasks from task-manager.js
|
||||
- Handle file paths using findTasksJsonPath utility
|
||||
- Process arguments: prompt, num, force, research
|
||||
- Validate inputs and handle errors with try/catch
|
||||
- Return standardized { success, data/error } object
|
||||
|
||||
2. Export from task-master-core.js:
|
||||
- Import the function from its file
|
||||
- Add to directFunctions map
|
||||
|
||||
3. Create expand-all.js MCP tool in mcp-server/src/tools/:
|
||||
- Import z from zod for parameter schema
|
||||
- Import executeMCPToolAction from ./utils.js
|
||||
- Import expandAllTasksDirect from task-master-core.js
|
||||
- Define parameters matching CLI options using zod schema
|
||||
- Implement registerExpandAllTool(server) with server.addTool
|
||||
- Use executeMCPToolAction in execute method
|
||||
|
||||
4. Register in tools/index.js with tool name 'expand_all'
|
||||
|
||||
5. Add to .cursor/mcp.json with appropriate schema
|
||||
|
||||
6. Write tests following testing guidelines:
|
||||
- Unit test for expandAllTasksDirect.js
|
||||
- Integration test for MCP tool
|
||||
|
||||
## 31. Create Core Direct Function Structure [done]
|
||||
### Dependencies: None
|
||||
### Description: Set up the modular directory structure for direct functions and update task-master-core.js to act as an import/export hub.
|
||||
### Details:
|
||||
1. Create the mcp-server/src/core/direct-functions/ directory structure
|
||||
2. Update task-master-core.js to import and re-export functions from individual files
|
||||
3. Create a utils directory for shared utility functions
|
||||
4. Implement a standard template for direct function files
|
||||
5. Create documentation for the new modular structure
|
||||
6. Update existing imports in MCP tools to use the new structure
|
||||
7. Create unit tests for the import/export hub functionality
|
||||
8. Ensure backward compatibility with any existing code using the old structure
|
||||
|
||||
## 32. Refactor Existing Direct Functions to Modular Structure [done]
|
||||
### Dependencies: 23.31
|
||||
### Description: Move existing direct function implementations from task-master-core.js to individual files in the new directory structure.
|
||||
### Details:
|
||||
1. Identify all existing direct functions in task-master-core.js
|
||||
2. Create individual files for each function in mcp-server/src/core/direct-functions/
|
||||
3. Move the implementation to the new files, ensuring consistent error handling
|
||||
4. Update imports/exports in task-master-core.js
|
||||
5. Create unit tests for each individual function file
|
||||
6. Update documentation to reflect the new structure
|
||||
7. Ensure all MCP tools reference the functions through task-master-core.js
|
||||
8. Verify backward compatibility with existing code
|
||||
|
||||
## 33. Implement Naming Convention Standards [done]
|
||||
### Dependencies: None
|
||||
### Description: Update all MCP server components to follow the standardized naming conventions for files, functions, and tools.
|
||||
### Details:
|
||||
1. Audit all existing MCP server files and update file names to use kebab-case (like-this.js)
|
||||
2. Refactor direct function names to use camelCase with Direct suffix (functionNameDirect)
|
||||
3. Update tool registration functions to use camelCase with Tool suffix (registerToolNameTool)
|
||||
4. Ensure all MCP tool names exposed to clients use snake_case (tool_name)
|
||||
5. Create a naming convention documentation file for future reference
|
||||
6. Update imports/exports in all files to reflect the new naming conventions
|
||||
7. Verify that all tools are properly registered with the correct naming pattern
|
||||
8. Update tests to reflect the new naming conventions
|
||||
9. Create a linting rule to enforce naming conventions in future development
|
||||
|
||||
## 34. Review functionality of all MCP direct functions [in-progress]
|
||||
### Dependencies: None
|
||||
### Description: Verify that all implemented MCP direct functions work correctly with edge cases
|
||||
### Details:
|
||||
Perform comprehensive testing of all MCP direct function implementations to ensure they handle various input scenarios correctly and return appropriate responses. Check edge cases, error handling, and parameter validation.
|
||||
|
||||
## 35. Review commands.js to ensure all commands are available via MCP [done]
|
||||
### Dependencies: None
|
||||
### Description: Verify that all CLI commands have corresponding MCP implementations
|
||||
### Details:
|
||||
Compare the commands defined in scripts/modules/commands.js with the MCP tools implemented in mcp-server/src/tools/. Create a list of any commands missing MCP implementations and ensure all command options are properly represented in the MCP parameter schemas.
|
||||
|
||||
## 36. Finish setting up addResearch in index.js [done]
|
||||
### Dependencies: None
|
||||
### Description: Complete the implementation of addResearch functionality in the MCP server
|
||||
### Details:
|
||||
Implement the addResearch function in the MCP server's index.js file to enable research-backed functionality. This should include proper integration with Perplexity AI and ensure that all MCP tools requiring research capabilities have access to this functionality.
|
||||
|
||||
## 37. Finish setting up addTemplates in index.js [done]
|
||||
### Dependencies: None
|
||||
### Description: Complete the implementation of addTemplates functionality in the MCP server
|
||||
### Details:
|
||||
Implement the addTemplates function in the MCP server's index.js file to enable template-based generation. Configure proper loading of templates from the appropriate directory and ensure they're accessible to all MCP tools that need to generate formatted content.
|
||||
|
||||
## 38. Implement robust project root handling for file paths [done]
|
||||
### Dependencies: None
|
||||
### Description: Create a consistent approach for handling project root paths across MCP tools
|
||||
### Details:
|
||||
Analyze and refactor the project root handling mechanism to ensure consistent file path resolution across all MCP direct functions. This should properly handle relative and absolute paths, respect the projectRoot parameter when provided, and have appropriate fallbacks when not specified. Document the approach in a comment within path-utils.js for future maintainers.
|
||||
|
||||
<info added on 2025-04-01T02:21:57.137Z>
|
||||
Here's additional information addressing the request for research on npm package path handling:
|
||||
|
||||
## Path Handling Best Practices for npm Packages
|
||||
|
||||
### Distinguishing Package and Project Paths
|
||||
|
||||
1. **Package Installation Path**:
|
||||
- Use `require.resolve()` to find paths relative to your package
|
||||
- For global installs, use `process.execPath` to locate the Node.js executable
|
||||
|
||||
2. **Project Path**:
|
||||
- Use `process.cwd()` as a starting point
|
||||
- Search upwards for `package.json` or `.git` to find project root
|
||||
- Consider using packages like `find-up` or `pkg-dir` for robust root detection
|
||||
|
||||
### Standard Approaches
|
||||
|
||||
1. **Detecting Project Root**:
|
||||
- Recursive search for `package.json` or `.git` directory
|
||||
- Use `path.resolve()` to handle relative paths
|
||||
- Fall back to `process.cwd()` if no root markers found
|
||||
|
||||
2. **Accessing Package Files**:
|
||||
- Use `__dirname` for paths relative to current script
|
||||
- For files in `node_modules`, use `require.resolve('package-name/path/to/file')`
|
||||
|
||||
3. **Separating Package and Project Files**:
|
||||
- Store package-specific files in a dedicated directory (e.g., `.task-master`)
|
||||
- Use environment variables to override default paths
|
||||
|
||||
### Cross-Platform Compatibility
|
||||
|
||||
1. Use `path.join()` and `path.resolve()` for cross-platform path handling
|
||||
2. Avoid hardcoded forward/backslashes in paths
|
||||
3. Use `os.homedir()` for user home directory references
|
||||
|
||||
### Best Practices for Path Resolution
|
||||
|
||||
1. **Absolute vs Relative Paths**:
|
||||
- Always convert relative paths to absolute using `path.resolve()`
|
||||
- Use `path.isAbsolute()` to check if a path is already absolute
|
||||
|
||||
2. **Handling Different Installation Scenarios**:
|
||||
- Local dev: Use `process.cwd()` as fallback project root
|
||||
- Local dependency: Resolve paths relative to consuming project
|
||||
- Global install: Use `process.execPath` to locate global `node_modules`
|
||||
|
||||
3. **Configuration Options**:
|
||||
- Allow users to specify custom project root via CLI option or config file
|
||||
- Implement a clear precedence order for path resolution (e.g., CLI option > config file > auto-detection)
|
||||
|
||||
4. **Error Handling**:
|
||||
- Provide clear error messages when critical paths cannot be resolved
|
||||
- Implement retry logic with alternative methods if primary path detection fails
|
||||
|
||||
5. **Documentation**:
|
||||
- Clearly document path handling behavior in README and inline comments
|
||||
- Provide examples for common scenarios and edge cases
|
||||
|
||||
By implementing these practices, the MCP tools can achieve consistent and robust path handling across various npm installation and usage scenarios.
|
||||
</info added on 2025-04-01T02:21:57.137Z>
|
||||
|
||||
<info added on 2025-04-01T02:25:01.463Z>
|
||||
Here's additional information addressing the request for clarification on path handling challenges for npm packages:
|
||||
|
||||
## Advanced Path Handling Challenges and Solutions
|
||||
|
||||
### Challenges to Avoid
|
||||
|
||||
1. **Relying solely on process.cwd()**:
|
||||
- Global installs: process.cwd() could be any directory
|
||||
- Local installs as dependency: points to parent project's root
|
||||
- Users may run commands from subdirectories
|
||||
|
||||
2. **Dual Path Requirements**:
|
||||
- Package Path: Where task-master code is installed
|
||||
- Project Path: Where user's tasks.json resides
|
||||
|
||||
3. **Specific Edge Cases**:
|
||||
- Non-project directory execution
|
||||
- Deeply nested project structures
|
||||
- Yarn/pnpm workspaces
|
||||
- Monorepos with multiple tasks.json files
|
||||
- Commands invoked from scripts in different directories
|
||||
|
||||
### Advanced Solutions
|
||||
|
||||
1. **Project Marker Detection**:
|
||||
- Implement recursive search for package.json or .git
|
||||
- Use `find-up` package for efficient directory traversal
|
||||
```javascript
|
||||
const findUp = require('find-up');
|
||||
const projectRoot = await findUp(dir => findUp.sync('package.json', { cwd: dir }));
|
||||
```
|
||||
|
||||
2. **Package Path Resolution**:
|
||||
- Leverage `import.meta.url` with `fileURLToPath`:
|
||||
```javascript
|
||||
import { fileURLToPath } from 'url';
|
||||
import path from 'path';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const packageRoot = path.resolve(__dirname, '..');
|
||||
```
|
||||
|
||||
3. **Workspace-Aware Resolution**:
|
||||
- Detect Yarn/pnpm workspaces:
|
||||
```javascript
|
||||
const findWorkspaceRoot = require('find-yarn-workspace-root');
|
||||
const workspaceRoot = findWorkspaceRoot(process.cwd());
|
||||
```
|
||||
|
||||
4. **Monorepo Handling**:
|
||||
- Implement cascading configuration search
|
||||
- Allow multiple tasks.json files with clear precedence rules
|
||||
|
||||
5. **CLI Tool Inspiration**:
|
||||
- ESLint: Uses `eslint-find-rule-files` for config discovery
|
||||
- Jest: Implements `jest-resolve` for custom module resolution
|
||||
- Next.js: Uses `find-up` to locate project directories
|
||||
|
||||
6. **Robust Path Resolution Algorithm**:
|
||||
```javascript
|
||||
function resolveProjectRoot(startDir) {
|
||||
const projectMarkers = ['package.json', '.git', 'tasks.json'];
|
||||
let currentDir = startDir;
|
||||
while (currentDir !== path.parse(currentDir).root) {
|
||||
if (projectMarkers.some(marker => fs.existsSync(path.join(currentDir, marker)))) {
|
||||
return currentDir;
|
||||
}
|
||||
currentDir = path.dirname(currentDir);
|
||||
}
|
||||
return startDir; // Fallback to original directory
|
||||
}
|
||||
```
|
||||
|
||||
7. **Environment Variable Overrides**:
|
||||
- Allow users to explicitly set paths:
|
||||
```javascript
|
||||
const projectRoot = process.env.TASK_MASTER_PROJECT_ROOT || resolveProjectRoot(process.cwd());
|
||||
```
|
||||
|
||||
By implementing these advanced techniques, task-master can achieve robust path handling across various npm scenarios without requiring manual specification.
|
||||
</info added on 2025-04-01T02:25:01.463Z>
|
||||
|
||||
## 39. Implement add-dependency MCP command [done]
|
||||
### Dependencies: 23.31
|
||||
### Description: Create MCP tool implementation for the add-dependency command
|
||||
### Details:
|
||||
|
||||
|
||||
## 40. Implement remove-dependency MCP command [done]
|
||||
### Dependencies: 23.31
|
||||
### Description: Create MCP tool implementation for the remove-dependency command
|
||||
### Details:
|
||||
|
||||
|
||||
## 41. Implement validate-dependencies MCP command [done]
|
||||
### Dependencies: 23.31, 23.39, 23.40
|
||||
### Description: Create MCP tool implementation for the validate-dependencies command
|
||||
### Details:
|
||||
|
||||
|
||||
## 42. Implement fix-dependencies MCP command [done]
|
||||
### Dependencies: 23.31, 23.41
|
||||
### Description: Create MCP tool implementation for the fix-dependencies command
|
||||
### Details:
|
||||
|
||||
|
||||
## 43. Implement complexity-report MCP command [done]
|
||||
### Dependencies: 23.31
|
||||
### Description: Create MCP tool implementation for the complexity-report command
|
||||
### Details:
|
||||
|
||||
|
||||
## 44. Implement init MCP command [deferred]
|
||||
### Dependencies: None
|
||||
### Description: Create MCP tool implementation for the init command
|
||||
### Details:
|
||||
|
||||
|
||||
## 45. Support setting env variables through mcp server [pending]
|
||||
### Dependencies: None
|
||||
### Description: currently we need to access the env variables through the env file present in the project (that we either create or find and append to). we could abstract this by allowing users to define the env vars in the mcp.json directly as folks currently do. mcp.json should then be in gitignore if thats the case. but for this i think in fastmcp all we need is to access ENV in a specific way. we need to find that way and then implement it
|
||||
### Details:
|
||||
|
||||
|
||||
<info added on 2025-04-01T01:57:24.160Z>
|
||||
To access environment variables defined in the mcp.json config file when using FastMCP, you can utilize the `Config` class from the `fastmcp` module. Here's how to implement this:
|
||||
|
||||
1. Import the necessary module:
|
||||
```python
|
||||
from fastmcp import Config
|
||||
```
|
||||
|
||||
2. Access environment variables:
|
||||
```python
|
||||
config = Config()
|
||||
env_var = config.env.get("VARIABLE_NAME")
|
||||
```
|
||||
|
||||
This approach allows you to retrieve environment variables defined in the mcp.json file directly in your code. The `Config` class automatically loads the configuration, including environment variables, from the mcp.json file.
|
||||
|
||||
For security, ensure that sensitive information in mcp.json is not committed to version control. You can add mcp.json to your .gitignore file to prevent accidental commits.
|
||||
|
||||
If you need to access multiple environment variables, you can do so like this:
|
||||
```python
|
||||
db_url = config.env.get("DATABASE_URL")
|
||||
api_key = config.env.get("API_KEY")
|
||||
debug_mode = config.env.get("DEBUG_MODE", False) # With a default value
|
||||
```
|
||||
|
||||
This method provides a clean and consistent way to access environment variables defined in the mcp.json configuration file within your FastMCP project.
|
||||
</info added on 2025-04-01T01:57:24.160Z>
|
||||
|
||||
<info added on 2025-04-01T01:57:49.848Z>
|
||||
To access environment variables defined in the mcp.json config file when using FastMCP in a JavaScript environment, you can use the `fastmcp` npm package. Here's how to implement this:
|
||||
|
||||
1. Install the `fastmcp` package:
|
||||
```bash
|
||||
npm install fastmcp
|
||||
```
|
||||
|
||||
2. Import the necessary module:
|
||||
```javascript
|
||||
const { Config } = require('fastmcp');
|
||||
```
|
||||
|
||||
3. Access environment variables:
|
||||
```javascript
|
||||
const config = new Config();
|
||||
const envVar = config.env.get('VARIABLE_NAME');
|
||||
```
|
||||
|
||||
This approach allows you to retrieve environment variables defined in the mcp.json file directly in your JavaScript code. The `Config` class automatically loads the configuration, including environment variables, from the mcp.json file.
|
||||
|
||||
You can access multiple environment variables like this:
|
||||
```javascript
|
||||
const dbUrl = config.env.get('DATABASE_URL');
|
||||
const apiKey = config.env.get('API_KEY');
|
||||
const debugMode = config.env.get('DEBUG_MODE', false); // With a default value
|
||||
```
|
||||
|
||||
This method provides a consistent way to access environment variables defined in the mcp.json configuration file within your FastMCP project in a JavaScript environment.
|
||||
</info added on 2025-04-01T01:57:49.848Z>
|
||||
|
||||
## 46. adjust rules so it prioritizes mcp commands over script [done]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
|
||||
39
tasks/task_040.txt
Normal file
39
tasks/task_040.txt
Normal file
@@ -0,0 +1,39 @@
|
||||
# Task ID: 40
|
||||
# Title: Implement 'plan' Command for Task Implementation Planning
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Create a new 'plan' command that appends a structured implementation plan to tasks or subtasks, generating step-by-step instructions for execution based on the task content.
|
||||
# Details:
|
||||
Implement a new 'plan' command that will append a structured implementation plan to existing tasks or subtasks. The implementation should:
|
||||
|
||||
1. Accept an '--id' parameter that can reference either a task or subtask ID
|
||||
2. Determine whether the ID refers to a task or subtask and retrieve the appropriate content from tasks.json and/or individual task files
|
||||
3. Generate a step-by-step implementation plan using AI (Claude by default)
|
||||
4. Support a '--research' flag to use Perplexity instead of Claude when needed
|
||||
5. Format the generated plan within XML tags like `<implementation_plan as of timestamp>...</implementation_plan>`
|
||||
6. Append this plan to the implementation details section of the task/subtask
|
||||
7. Display a confirmation card indicating the implementation plan was successfully created
|
||||
|
||||
The implementation plan should be detailed and actionable, containing specific steps such as searching for files, creating new files, modifying existing files, etc. The goal is to frontload planning work into the task/subtask so execution can begin immediately.
|
||||
|
||||
Reference the existing 'update-subtask' command implementation as a starting point, as it uses a similar approach for appending content to tasks. Ensure proper error handling for cases where the specified ID doesn't exist or when API calls fail.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify:
|
||||
|
||||
1. Command correctly identifies and retrieves content for both task and subtask IDs
|
||||
2. Implementation plans are properly generated and formatted with XML tags and timestamps
|
||||
3. Plans are correctly appended to the implementation details section without overwriting existing content
|
||||
4. The '--research' flag successfully switches the backend from Claude to Perplexity
|
||||
5. Appropriate error messages are displayed for invalid IDs or API failures
|
||||
6. Confirmation card is displayed after successful plan creation
|
||||
|
||||
Test cases should include:
|
||||
- Running 'plan --id 123' on an existing task
|
||||
- Running 'plan --id 123.1' on an existing subtask
|
||||
- Running 'plan --id 123 --research' to test the Perplexity integration
|
||||
- Running 'plan --id 999' with a non-existent ID to verify error handling
|
||||
- Running the command on tasks with existing implementation plans to ensure proper appending
|
||||
|
||||
Manually review the quality of generated plans to ensure they provide actionable, step-by-step guidance that accurately reflects the task requirements.
|
||||
72
tasks/task_041.txt
Normal file
72
tasks/task_041.txt
Normal file
@@ -0,0 +1,72 @@
|
||||
# Task ID: 41
|
||||
# Title: Implement Visual Task Dependency Graph in Terminal
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Create a feature that renders task dependencies as a visual graph using ASCII/Unicode characters in the terminal, with color-coded nodes representing tasks and connecting lines showing dependency relationships.
|
||||
# Details:
|
||||
This implementation should include:
|
||||
|
||||
1. Create a new command `graph` or `visualize` that displays the dependency graph.
|
||||
|
||||
2. Design an ASCII/Unicode-based graph rendering system that:
|
||||
- Represents each task as a node with its ID and abbreviated title
|
||||
- Shows dependencies as directional lines between nodes (→, ↑, ↓, etc.)
|
||||
- Uses color coding for different task statuses (e.g., green for completed, yellow for in-progress, red for blocked)
|
||||
- Handles complex dependency chains with proper spacing and alignment
|
||||
|
||||
3. Implement layout algorithms to:
|
||||
- Minimize crossing lines for better readability
|
||||
- Properly space nodes to avoid overlapping
|
||||
- Support both vertical and horizontal graph orientations (as a configurable option)
|
||||
|
||||
4. Add detection and highlighting of circular dependencies with a distinct color/pattern
|
||||
|
||||
5. Include a legend explaining the color coding and symbols used
|
||||
|
||||
6. Ensure the graph is responsive to terminal width, with options to:
|
||||
- Automatically scale to fit the current terminal size
|
||||
- Allow zooming in/out of specific sections for large graphs
|
||||
- Support pagination or scrolling for very large dependency networks
|
||||
|
||||
7. Add options to filter the graph by:
|
||||
- Specific task IDs or ranges
|
||||
- Task status
|
||||
- Dependency depth (e.g., show only direct dependencies or N levels deep)
|
||||
|
||||
8. Ensure accessibility by using distinct patterns in addition to colors for users with color vision deficiencies
|
||||
|
||||
9. Optimize performance for projects with many tasks and complex dependency relationships
|
||||
|
||||
# Test Strategy:
|
||||
1. Unit Tests:
|
||||
- Test the graph generation algorithm with various dependency structures
|
||||
- Verify correct node placement and connection rendering
|
||||
- Test circular dependency detection
|
||||
- Verify color coding matches task statuses
|
||||
|
||||
2. Integration Tests:
|
||||
- Test the command with projects of varying sizes (small, medium, large)
|
||||
- Verify correct handling of different terminal sizes
|
||||
- Test all filtering options
|
||||
|
||||
3. Visual Verification:
|
||||
- Create test cases with predefined dependency structures and verify the visual output matches expected patterns
|
||||
- Test with terminals of different sizes, including very narrow terminals
|
||||
- Verify readability of complex graphs
|
||||
|
||||
4. Edge Cases:
|
||||
- Test with no dependencies (single nodes only)
|
||||
- Test with circular dependencies
|
||||
- Test with very deep dependency chains
|
||||
- Test with wide dependency networks (many parallel tasks)
|
||||
- Test with the maximum supported number of tasks
|
||||
|
||||
5. Usability Testing:
|
||||
- Have team members use the feature and provide feedback on readability and usefulness
|
||||
- Test in different terminal emulators to ensure compatibility
|
||||
- Verify the feature works in terminals with limited color support
|
||||
|
||||
6. Performance Testing:
|
||||
- Measure rendering time for large projects
|
||||
- Ensure reasonable performance with 100+ interconnected tasks
|
||||
91
tasks/task_042.txt
Normal file
91
tasks/task_042.txt
Normal file
@@ -0,0 +1,91 @@
|
||||
# Task ID: 42
|
||||
# Title: Implement MCP-to-MCP Communication Protocol
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Design and implement a communication protocol that allows Taskmaster to interact with external MCP (Model Context Protocol) tools and servers, enabling programmatic operations across these tools without requiring custom integration code. The system should dynamically connect to MCP servers chosen by the user for task storage and management (e.g., GitHub-MCP or Postgres-MCP). This eliminates the need for separate APIs or SDKs for each service. The goal is to create a standardized, agnostic system that facilitates seamless task execution and interaction with external systems. Additionally, the system should support two operational modes: **solo/local mode**, where tasks are managed locally using a `tasks.json` file, and **multiplayer/remote mode**, where tasks are managed via external MCP integrations. The core modules of Taskmaster should dynamically adapt their operations based on the selected mode, with multiplayer/remote mode leveraging MCP servers for all task management operations.
|
||||
# Details:
|
||||
This task involves creating a standardized way for Taskmaster to communicate with external MCP implementations and tools. The implementation should:
|
||||
|
||||
1. Define a standard protocol for communication with MCP servers, including authentication, request/response formats, and error handling.
|
||||
2. Leverage the existing `fastmcp` server logic to enable interaction with external MCP tools programmatically, focusing on creating a modular and reusable system.
|
||||
3. Implement an adapter pattern that allows Taskmaster to connect to any MCP-compliant tool or server.
|
||||
4. Build a client module capable of discovering, connecting to, and exchanging data with external MCP tools, ensuring compatibility with various implementations.
|
||||
5. Provide a reference implementation for interacting with a specific MCP tool (e.g., GitHub-MCP or Postgres-MCP) to demonstrate the protocol's functionality.
|
||||
6. Ensure the protocol supports versioning to maintain compatibility as MCP tools evolve.
|
||||
7. Implement rate limiting and backoff strategies to prevent overwhelming external MCP tools.
|
||||
8. Create a configuration system that allows users to specify connection details for external MCP tools and servers.
|
||||
9. Add support for two operational modes:
|
||||
- **Solo/Local Mode**: Tasks are managed locally using a `tasks.json` file.
|
||||
- **Multiplayer/Remote Mode**: Tasks are managed via external MCP integrations (e.g., GitHub-MCP or Postgres-MCP). The system should dynamically switch between these modes based on user configuration.
|
||||
10. Update core modules to perform task operations on the appropriate system (local or remote) based on the selected mode, with remote mode relying entirely on MCP servers for task management.
|
||||
11. Document the protocol thoroughly to enable other developers to implement it in their MCP tools.
|
||||
|
||||
The implementation should prioritize asynchronous communication where appropriate and handle network failures gracefully. Security considerations, including encryption and robust authentication mechanisms, should be integral to the design.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify both the protocol design and implementation:
|
||||
|
||||
1. Unit tests for the adapter pattern, ensuring it correctly translates between Taskmaster's internal models and the MCP protocol.
|
||||
2. Integration tests with a mock MCP tool or server to validate the full request/response cycle.
|
||||
3. Specific tests for the reference implementation (e.g., GitHub-MCP or Postgres-MCP), including authentication flows.
|
||||
4. Error handling tests that simulate network failures, timeouts, and malformed responses.
|
||||
5. Performance tests to ensure the communication does not introduce significant latency.
|
||||
6. Security tests to verify that authentication and encryption mechanisms are functioning correctly.
|
||||
7. End-to-end tests demonstrating Taskmaster's ability to programmatically interact with external MCP tools and execute tasks.
|
||||
8. Compatibility tests with different versions of the protocol to ensure backward compatibility.
|
||||
9. Tests for mode switching:
|
||||
- Validate that Taskmaster correctly operates in solo/local mode using the `tasks.json` file.
|
||||
- Validate that Taskmaster correctly operates in multiplayer/remote mode with external MCP integrations (e.g., GitHub-MCP or Postgres-MCP).
|
||||
- Ensure seamless switching between modes without data loss or corruption.
|
||||
10. A test harness should be created to simulate an MCP tool or server for testing purposes without relying on external dependencies. Test cases should be documented thoroughly to serve as examples for other implementations.
|
||||
|
||||
# Subtasks:
|
||||
## 42-1. Define MCP-to-MCP communication protocol [pending]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
## 42-2. Implement adapter pattern for MCP integration [pending]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
## 42-3. Develop client module for MCP tool discovery and interaction [pending]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
## 42-4. Provide reference implementation for GitHub-MCP integration [pending]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
## 42-5. Add support for solo/local and multiplayer/remote modes [pending]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
## 42-6. Update core modules to support dynamic mode-based operations [pending]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
## 42-7. Document protocol and mode-switching functionality [pending]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
## 42-8. Update terminology to reflect MCP server-based communication [pending]
|
||||
### Dependencies: None
|
||||
### Description:
|
||||
### Details:
|
||||
|
||||
|
||||
46
tasks/task_043.txt
Normal file
46
tasks/task_043.txt
Normal file
@@ -0,0 +1,46 @@
|
||||
# Task ID: 43
|
||||
# Title: Add Research Flag to Add-Task Command
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Implement a '--research' flag for the add-task command that enables users to automatically generate research-related subtasks when creating a new task.
|
||||
# Details:
|
||||
Modify the add-task command to accept a new optional flag '--research'. When this flag is provided, the system should automatically generate and attach a set of research-oriented subtasks to the newly created task. These subtasks should follow a standard research methodology structure:
|
||||
|
||||
1. Background Investigation: Research existing solutions and approaches
|
||||
2. Requirements Analysis: Define specific requirements and constraints
|
||||
3. Technology/Tool Evaluation: Compare potential technologies or tools for implementation
|
||||
4. Proof of Concept: Create a minimal implementation to validate approach
|
||||
5. Documentation: Document findings and recommendations
|
||||
|
||||
The implementation should:
|
||||
- Update the command-line argument parser to recognize the new flag
|
||||
- Create a dedicated function to generate the research subtasks with appropriate descriptions
|
||||
- Ensure subtasks are properly linked to the parent task
|
||||
- Update help documentation to explain the new flag
|
||||
- Maintain backward compatibility with existing add-task functionality
|
||||
|
||||
The research subtasks should be customized based on the main task's title and description when possible, rather than using generic templates.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify both the functionality and usability of the new feature:
|
||||
|
||||
1. Unit tests:
|
||||
- Test that the '--research' flag is properly parsed
|
||||
- Verify the correct number and structure of subtasks are generated
|
||||
- Ensure subtask IDs are correctly assigned and linked to the parent task
|
||||
|
||||
2. Integration tests:
|
||||
- Create a task with the research flag and verify all subtasks appear in the task list
|
||||
- Test that the research flag works with other existing flags (e.g., --priority, --depends-on)
|
||||
- Verify the task and subtasks are properly saved to the storage backend
|
||||
|
||||
3. Manual testing:
|
||||
- Run 'taskmaster add-task "Test task" --research' and verify the output
|
||||
- Check that the help documentation correctly describes the new flag
|
||||
- Verify the research subtasks have meaningful descriptions
|
||||
- Test the command with and without the flag to ensure backward compatibility
|
||||
|
||||
4. Edge cases:
|
||||
- Test with very short or very long task descriptions
|
||||
- Verify behavior when maximum task/subtask limits are reached
|
||||
50
tasks/task_044.txt
Normal file
50
tasks/task_044.txt
Normal file
@@ -0,0 +1,50 @@
|
||||
# Task ID: 44
|
||||
# Title: Implement Task Automation with Webhooks and Event Triggers
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Design and implement a system that allows users to automate task actions through webhooks and event triggers, enabling integration with external services and automated workflows.
|
||||
# Details:
|
||||
This feature will enable users to create automated workflows based on task events and external triggers. Implementation should include:
|
||||
|
||||
1. A webhook registration system that allows users to specify URLs to be called when specific task events occur (creation, status change, completion, etc.)
|
||||
2. An event system that captures and processes all task-related events
|
||||
3. A trigger definition interface where users can define conditions for automation (e.g., 'When task X is completed, create task Y')
|
||||
4. Support for both incoming webhooks (external services triggering actions in Taskmaster) and outgoing webhooks (Taskmaster notifying external services)
|
||||
5. A secure authentication mechanism for webhook calls
|
||||
6. Rate limiting and retry logic for failed webhook deliveries
|
||||
7. Integration with the existing task management system
|
||||
8. Command-line interface for managing webhooks and triggers
|
||||
9. Payload templating system allowing users to customize the data sent in webhooks
|
||||
10. Logging system for webhook activities and failures
|
||||
|
||||
The implementation should be compatible with both the solo/local mode and the multiplayer/remote mode, with appropriate adaptations for each context. When operating in MCP mode, the system should leverage the MCP communication protocol implemented in Task #42.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify both the functionality and security of the webhook system:
|
||||
|
||||
1. Unit tests:
|
||||
- Test webhook registration, modification, and deletion
|
||||
- Verify event capturing for all task operations
|
||||
- Test payload generation and templating
|
||||
- Validate authentication logic
|
||||
|
||||
2. Integration tests:
|
||||
- Set up a mock server to receive webhooks and verify payload contents
|
||||
- Test the complete flow from task event to webhook delivery
|
||||
- Verify rate limiting and retry behavior with intentionally failing endpoints
|
||||
- Test webhook triggers creating new tasks and modifying existing ones
|
||||
|
||||
3. Security tests:
|
||||
- Verify that authentication tokens are properly validated
|
||||
- Test for potential injection vulnerabilities in webhook payloads
|
||||
- Verify that sensitive information is not leaked in webhook payloads
|
||||
- Test rate limiting to prevent DoS attacks
|
||||
|
||||
4. Mode-specific tests:
|
||||
- Verify correct operation in both solo/local and multiplayer/remote modes
|
||||
- Test the interaction with MCP protocol when in multiplayer mode
|
||||
|
||||
5. Manual verification:
|
||||
- Set up integrations with common services (GitHub, Slack, etc.) to verify real-world functionality
|
||||
- Verify that the CLI interface for managing webhooks works as expected
|
||||
55
tasks/task_045.txt
Normal file
55
tasks/task_045.txt
Normal file
@@ -0,0 +1,55 @@
|
||||
# Task ID: 45
|
||||
# Title: Implement GitHub Issue Import Feature
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Add a '--from-github' flag to the add-task command that accepts a GitHub issue URL and automatically generates a corresponding task with relevant details.
|
||||
# Details:
|
||||
Implement a new flag '--from-github' for the add-task command that allows users to create tasks directly from GitHub issues. The implementation should:
|
||||
|
||||
1. Accept a GitHub issue URL as an argument (e.g., 'taskmaster add-task --from-github https://github.com/owner/repo/issues/123')
|
||||
2. Parse the URL to extract the repository owner, name, and issue number
|
||||
3. Use the GitHub API to fetch the issue details including:
|
||||
- Issue title (to be used as task title)
|
||||
- Issue description (to be used as task description)
|
||||
- Issue labels (to be potentially used as tags)
|
||||
- Issue assignees (for reference)
|
||||
- Issue status (open/closed)
|
||||
4. Generate a well-formatted task with this information
|
||||
5. Include a reference link back to the original GitHub issue
|
||||
6. Handle authentication for private repositories using GitHub tokens from environment variables or config file
|
||||
7. Implement proper error handling for:
|
||||
- Invalid URLs
|
||||
- Non-existent issues
|
||||
- API rate limiting
|
||||
- Authentication failures
|
||||
- Network issues
|
||||
8. Allow users to override or supplement the imported details with additional command-line arguments
|
||||
9. Add appropriate documentation in help text and user guide
|
||||
|
||||
# Test Strategy:
|
||||
Testing should cover the following scenarios:
|
||||
|
||||
1. Unit tests:
|
||||
- Test URL parsing functionality with valid and invalid GitHub issue URLs
|
||||
- Test GitHub API response parsing with mocked API responses
|
||||
- Test error handling for various failure cases
|
||||
|
||||
2. Integration tests:
|
||||
- Test with real GitHub public issues (use well-known repositories)
|
||||
- Test with both open and closed issues
|
||||
- Test with issues containing various elements (labels, assignees, comments)
|
||||
|
||||
3. Error case tests:
|
||||
- Invalid URL format
|
||||
- Non-existent repository
|
||||
- Non-existent issue number
|
||||
- API rate limit exceeded
|
||||
- Authentication failures for private repos
|
||||
|
||||
4. End-to-end tests:
|
||||
- Verify that a task created from a GitHub issue contains all expected information
|
||||
- Verify that the task can be properly managed after creation
|
||||
- Test the interaction with other flags and commands
|
||||
|
||||
Create mock GitHub API responses for testing to avoid hitting rate limits during development and testing. Use environment variables to configure test credentials if needed.
|
||||
329
tasks/tasks.json
329
tasks/tasks.json
File diff suppressed because one or more lines are too long
324
tests/unit/ai-client-utils.test.js
Normal file
324
tests/unit/ai-client-utils.test.js
Normal file
@@ -0,0 +1,324 @@
|
||||
/**
|
||||
* ai-client-utils.test.js
|
||||
* Tests for AI client utility functions
|
||||
*/
|
||||
|
||||
import { jest } from '@jest/globals';
|
||||
import {
|
||||
getAnthropicClientForMCP,
|
||||
getPerplexityClientForMCP,
|
||||
getModelConfig,
|
||||
getBestAvailableAIModel,
|
||||
handleClaudeError
|
||||
} from '../../mcp-server/src/core/utils/ai-client-utils.js';
|
||||
|
||||
// Mock the Anthropic constructor
|
||||
jest.mock('@anthropic-ai/sdk', () => {
|
||||
return {
|
||||
Anthropic: jest.fn().mockImplementation(() => {
|
||||
return {
|
||||
messages: {
|
||||
create: jest.fn().mockResolvedValue({})
|
||||
}
|
||||
};
|
||||
})
|
||||
};
|
||||
});
|
||||
|
||||
// Mock the OpenAI dynamic import
|
||||
jest.mock('openai', () => {
|
||||
return {
|
||||
default: jest.fn().mockImplementation(() => {
|
||||
return {
|
||||
chat: {
|
||||
completions: {
|
||||
create: jest.fn().mockResolvedValue({})
|
||||
}
|
||||
}
|
||||
};
|
||||
})
|
||||
};
|
||||
});
|
||||
|
||||
describe('AI Client Utilities', () => {
|
||||
const originalEnv = process.env;
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset process.env before each test
|
||||
process.env = { ...originalEnv };
|
||||
|
||||
// Clear all mocks
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
// Restore process.env
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
describe('getAnthropicClientForMCP', () => {
|
||||
it('should initialize client with API key from session', () => {
|
||||
// Setup
|
||||
const session = {
|
||||
env: {
|
||||
ANTHROPIC_API_KEY: 'test-key-from-session'
|
||||
}
|
||||
};
|
||||
const mockLog = { error: jest.fn() };
|
||||
|
||||
// Execute
|
||||
const client = getAnthropicClientForMCP(session, mockLog);
|
||||
|
||||
// Verify
|
||||
expect(client).toBeDefined();
|
||||
expect(client.messages.create).toBeDefined();
|
||||
expect(mockLog.error).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should fall back to process.env when session key is missing', () => {
|
||||
// Setup
|
||||
process.env.ANTHROPIC_API_KEY = 'test-key-from-env';
|
||||
const session = { env: {} };
|
||||
const mockLog = { error: jest.fn() };
|
||||
|
||||
// Execute
|
||||
const client = getAnthropicClientForMCP(session, mockLog);
|
||||
|
||||
// Verify
|
||||
expect(client).toBeDefined();
|
||||
expect(mockLog.error).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should throw error when API key is missing', () => {
|
||||
// Setup
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
const session = { env: {} };
|
||||
const mockLog = { error: jest.fn() };
|
||||
|
||||
// Execute & Verify
|
||||
expect(() => getAnthropicClientForMCP(session, mockLog)).toThrow();
|
||||
expect(mockLog.error).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getPerplexityClientForMCP', () => {
|
||||
it('should initialize client with API key from session', async () => {
|
||||
// Setup
|
||||
const session = {
|
||||
env: {
|
||||
PERPLEXITY_API_KEY: 'test-perplexity-key'
|
||||
}
|
||||
};
|
||||
const mockLog = { error: jest.fn() };
|
||||
|
||||
// Execute
|
||||
const client = await getPerplexityClientForMCP(session, mockLog);
|
||||
|
||||
// Verify
|
||||
expect(client).toBeDefined();
|
||||
expect(client.chat.completions.create).toBeDefined();
|
||||
expect(mockLog.error).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should throw error when API key is missing', async () => {
|
||||
// Setup
|
||||
delete process.env.PERPLEXITY_API_KEY;
|
||||
const session = { env: {} };
|
||||
const mockLog = { error: jest.fn() };
|
||||
|
||||
// Execute & Verify
|
||||
await expect(getPerplexityClientForMCP(session, mockLog)).rejects.toThrow();
|
||||
expect(mockLog.error).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getModelConfig', () => {
|
||||
it('should get model config from session', () => {
|
||||
// Setup
|
||||
const session = {
|
||||
env: {
|
||||
MODEL: 'claude-3-opus',
|
||||
MAX_TOKENS: '8000',
|
||||
TEMPERATURE: '0.5'
|
||||
}
|
||||
};
|
||||
|
||||
// Execute
|
||||
const config = getModelConfig(session);
|
||||
|
||||
// Verify
|
||||
expect(config).toEqual({
|
||||
model: 'claude-3-opus',
|
||||
maxTokens: 8000,
|
||||
temperature: 0.5
|
||||
});
|
||||
});
|
||||
|
||||
it('should use default values when session values are missing', () => {
|
||||
// Setup
|
||||
const session = {
|
||||
env: {
|
||||
// No values
|
||||
}
|
||||
};
|
||||
|
||||
// Execute
|
||||
const config = getModelConfig(session);
|
||||
|
||||
// Verify
|
||||
expect(config).toEqual({
|
||||
model: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 64000,
|
||||
temperature: 0.2
|
||||
});
|
||||
});
|
||||
|
||||
it('should allow custom defaults', () => {
|
||||
// Setup
|
||||
const session = { env: {} };
|
||||
const customDefaults = {
|
||||
model: 'custom-model',
|
||||
maxTokens: 2000,
|
||||
temperature: 0.3
|
||||
};
|
||||
|
||||
// Execute
|
||||
const config = getModelConfig(session, customDefaults);
|
||||
|
||||
// Verify
|
||||
expect(config).toEqual(customDefaults);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getBestAvailableAIModel', () => {
|
||||
it('should return Perplexity for research when available', async () => {
|
||||
// Setup
|
||||
const session = {
|
||||
env: {
|
||||
PERPLEXITY_API_KEY: 'test-perplexity-key',
|
||||
ANTHROPIC_API_KEY: 'test-anthropic-key'
|
||||
}
|
||||
};
|
||||
const mockLog = { warn: jest.fn(), info: jest.fn(), error: jest.fn() };
|
||||
|
||||
// Execute
|
||||
const result = await getBestAvailableAIModel(session, { requiresResearch: true }, mockLog);
|
||||
|
||||
// Verify
|
||||
expect(result.type).toBe('perplexity');
|
||||
expect(result.client).toBeDefined();
|
||||
});
|
||||
|
||||
it('should return Claude when Perplexity is not available and Claude is not overloaded', async () => {
|
||||
// Setup
|
||||
const session = {
|
||||
env: {
|
||||
ANTHROPIC_API_KEY: 'test-anthropic-key'
|
||||
// Purposely not including PERPLEXITY_API_KEY
|
||||
}
|
||||
};
|
||||
const mockLog = { warn: jest.fn(), info: jest.fn(), error: jest.fn() };
|
||||
|
||||
// Execute
|
||||
const result = await getBestAvailableAIModel(session, { requiresResearch: true }, mockLog);
|
||||
|
||||
// Verify
|
||||
// In our implementation, we prioritize research capability through Perplexity
|
||||
// so if we're testing research but Perplexity isn't available, Claude is used
|
||||
expect(result.type).toBe('perplexity');
|
||||
expect(result.client).toBeDefined();
|
||||
expect(mockLog.warn).not.toHaveBeenCalled(); // No warning since implementation succeeds
|
||||
});
|
||||
|
||||
it('should fall back to Claude as last resort when overloaded', async () => {
|
||||
// Setup
|
||||
const session = {
|
||||
env: {
|
||||
ANTHROPIC_API_KEY: 'test-anthropic-key'
|
||||
}
|
||||
};
|
||||
const mockLog = { warn: jest.fn(), info: jest.fn(), error: jest.fn() };
|
||||
|
||||
// Execute
|
||||
const result = await getBestAvailableAIModel(session, { claudeOverloaded: true }, mockLog);
|
||||
|
||||
// Verify
|
||||
expect(result.type).toBe('claude');
|
||||
expect(result.client).toBeDefined();
|
||||
expect(mockLog.warn).toHaveBeenCalled(); // Warning about Claude overloaded
|
||||
});
|
||||
|
||||
it('should throw error when no models are available', async () => {
|
||||
// Setup
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
delete process.env.PERPLEXITY_API_KEY;
|
||||
const session = { env: {} };
|
||||
const mockLog = { warn: jest.fn(), info: jest.fn(), error: jest.fn() };
|
||||
|
||||
// Execute & Verify
|
||||
await expect(getBestAvailableAIModel(session, {}, mockLog)).rejects.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('handleClaudeError', () => {
|
||||
it('should handle overloaded error', () => {
|
||||
// Setup
|
||||
const error = {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'overloaded_error',
|
||||
message: 'Claude is overloaded'
|
||||
}
|
||||
};
|
||||
|
||||
// Execute
|
||||
const message = handleClaudeError(error);
|
||||
|
||||
// Verify
|
||||
expect(message).toContain('overloaded');
|
||||
});
|
||||
|
||||
it('should handle rate limit error', () => {
|
||||
// Setup
|
||||
const error = {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'rate_limit_error',
|
||||
message: 'Rate limit exceeded'
|
||||
}
|
||||
};
|
||||
|
||||
// Execute
|
||||
const message = handleClaudeError(error);
|
||||
|
||||
// Verify
|
||||
expect(message).toContain('rate limit');
|
||||
});
|
||||
|
||||
it('should handle timeout error', () => {
|
||||
// Setup
|
||||
const error = {
|
||||
message: 'Request timed out after 60 seconds'
|
||||
};
|
||||
|
||||
// Execute
|
||||
const message = handleClaudeError(error);
|
||||
|
||||
// Verify
|
||||
expect(message).toContain('timed out');
|
||||
});
|
||||
|
||||
it('should handle generic errors', () => {
|
||||
// Setup
|
||||
const error = {
|
||||
message: 'Something went wrong'
|
||||
};
|
||||
|
||||
// Execute
|
||||
const message = handleClaudeError(error);
|
||||
|
||||
// Verify
|
||||
expect(message).toContain('Error communicating with Claude');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -177,26 +177,42 @@ describe('UI Module', () => {
|
||||
|
||||
describe('createProgressBar function', () => {
|
||||
test('should create a progress bar with the correct percentage', () => {
|
||||
const result = createProgressBar(50, 10);
|
||||
expect(result).toBe('█████░░░░░ 50%');
|
||||
const result = createProgressBar(50, 10, {
|
||||
'pending': 20,
|
||||
'in-progress': 15,
|
||||
'blocked': 5
|
||||
});
|
||||
expect(result).toContain('50%');
|
||||
});
|
||||
|
||||
test('should handle 0% progress', () => {
|
||||
const result = createProgressBar(0, 10);
|
||||
expect(result).toBe('░░░░░░░░░░ 0%');
|
||||
expect(result).toContain('0%');
|
||||
});
|
||||
|
||||
test('should handle 100% progress', () => {
|
||||
const result = createProgressBar(100, 10);
|
||||
expect(result).toBe('██████████ 100%');
|
||||
expect(result).toContain('100%');
|
||||
});
|
||||
|
||||
test('should handle invalid percentages by clamping', () => {
|
||||
const result1 = createProgressBar(0, 10); // -10 should clamp to 0
|
||||
expect(result1).toBe('░░░░░░░░░░ 0%');
|
||||
const result1 = createProgressBar(0, 10);
|
||||
expect(result1).toContain('0%');
|
||||
|
||||
const result2 = createProgressBar(100, 10); // 150 should clamp to 100
|
||||
expect(result2).toBe('██████████ 100%');
|
||||
const result2 = createProgressBar(100, 10);
|
||||
expect(result2).toContain('100%');
|
||||
});
|
||||
|
||||
test('should support status breakdown in the progress bar', () => {
|
||||
const result = createProgressBar(30, 10, {
|
||||
'pending': 30,
|
||||
'in-progress': 20,
|
||||
'blocked': 10,
|
||||
'deferred': 5,
|
||||
'cancelled': 5
|
||||
});
|
||||
|
||||
expect(result).toContain('40%');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
Reference in New Issue
Block a user