Compare commits
66 Commits
v0.12.1
...
recovered-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f37ef2c3a3 | ||
|
|
887d9bffa7 | ||
|
|
ac0c2e3854 | ||
|
|
3628951120 | ||
|
|
9620e032ac | ||
|
|
597a2f7494 | ||
|
|
b8362bbbe7 | ||
|
|
c280f963fe | ||
|
|
880a98e8e2 | ||
|
|
a8538b2e9c | ||
|
|
7c7f205350 | ||
|
|
7f214b76d7 | ||
|
|
684ae52542 | ||
|
|
4ae97f145e | ||
|
|
b684753a35 | ||
|
|
bda54f3296 | ||
|
|
ab38a48599 | ||
|
|
e519a832f6 | ||
|
|
433c5df414 | ||
|
|
225a0781e9 | ||
|
|
52adb5c2f6 | ||
|
|
9869ebe045 | ||
|
|
40ed37b166 | ||
|
|
3cdaff6c66 | ||
|
|
6161febbde | ||
|
|
281c476738 | ||
|
|
78840a1f45 | ||
|
|
6bbc1b4499 | ||
|
|
65e0fcc328 | ||
|
|
e90f822bdd | ||
|
|
22bd13c197 | ||
|
|
059ce5e716 | ||
|
|
38a2805dd8 | ||
|
|
036a7bd2d3 | ||
|
|
b58badec36 | ||
|
|
f7970a542e | ||
|
|
ac6b0a3f14 | ||
|
|
6f87faa9dc | ||
|
|
c58d4b51ef | ||
|
|
9730576a03 | ||
|
|
a6a94e3a18 | ||
|
|
38c368a745 | ||
|
|
f032116961 | ||
|
|
c274c77aa7 | ||
|
|
1c72c88a32 | ||
|
|
f007df06d8 | ||
|
|
6481f725aa | ||
|
|
a3abf194ad | ||
|
|
0b6207c882 | ||
|
|
1bb1309ef8 | ||
|
|
5296e50b6a | ||
|
|
b2b1a1ef8f | ||
|
|
20d04b243b | ||
|
|
7cd94959b9 | ||
|
|
407a4e880d | ||
|
|
d822dc08fe | ||
|
|
5914771636 | ||
|
|
9d1ec10c34 | ||
|
|
7d90d6808d | ||
|
|
14a3512325 | ||
|
|
a186cb43e3 | ||
|
|
74dcf3b5f4 | ||
|
|
a588098fca | ||
|
|
99426d9bb1 | ||
|
|
151c31e550 | ||
|
|
26a37d28ce |
5
.changeset/slick-women-relate.md
Normal file
5
.changeset/slick-women-relate.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Add license to repo
|
||||||
126
.changeset/two-bats-smoke.md
Normal file
126
.changeset/two-bats-smoke.md
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
- Adjusts the MCP server invokation in the mcp.json we ship with `task-master init`. Fully functional now.
|
||||||
|
- Rename the npx -y command. It's now `npx -y task-master-ai task-master-mcp`
|
||||||
|
- Rename MCP tools to better align with API conventions and natural language in client chat:
|
||||||
|
- Rename `list-tasks` to `get-tasks` for more intuitive client requests like "get my tasks"
|
||||||
|
- Rename `show-task` to `get-task` for consistency with GET-based API naming conventions
|
||||||
|
|
||||||
|
- **Optimize MCP response payloads:**
|
||||||
|
- Add custom `processTaskResponse` function to `get-task` MCP tool to filter out unnecessary `allTasks` array data
|
||||||
|
- Significantly reduce response size by returning only the specific requested task instead of all tasks
|
||||||
|
- Preserve dependency status relationships for the UI/CLI while keeping MCP responses lean and efficient
|
||||||
|
|
||||||
|
- **Implement complete remove-task functionality:**
|
||||||
|
- Add `removeTask` core function to permanently delete tasks or subtasks from tasks.json
|
||||||
|
- Implement CLI command `remove-task` with confirmation prompt and force flag support
|
||||||
|
- Create MCP `remove_task` tool for AI-assisted task removal
|
||||||
|
- Automatically handle dependency cleanup by removing references to deleted tasks
|
||||||
|
- Update task files after removal to maintain consistency
|
||||||
|
- Provide robust error handling and detailed feedback messages
|
||||||
|
|
||||||
|
- **Update Cursor rules and documentation:**
|
||||||
|
- Enhance `new_features.mdc` with comprehensive guidelines for implementing removal commands
|
||||||
|
- Update `commands.mdc` with best practices for confirmation flows and cleanup procedures
|
||||||
|
- Expand `mcp.mdc` with detailed instructions for MCP tool implementation patterns
|
||||||
|
- Add examples of proper error handling and parameter validation to all relevant rules
|
||||||
|
- Include new sections about handling dependencies during task removal operations
|
||||||
|
- Document naming conventions and implementation patterns for destructive operations
|
||||||
|
|
||||||
|
- **Implement silent mode across all direct functions:**
|
||||||
|
- Add `enableSilentMode` and `disableSilentMode` utility imports to all direct function files
|
||||||
|
- Wrap all core function calls with silent mode to prevent console logs from interfering with JSON responses
|
||||||
|
- Add comprehensive error handling to ensure silent mode is disabled even when errors occur
|
||||||
|
- Fix "Unexpected token 'I', "[INFO] Gene"... is not valid JSON" errors by suppressing log output
|
||||||
|
- Apply consistent silent mode pattern across all MCP direct functions
|
||||||
|
- Maintain clean JSON responses for better integration with client tools
|
||||||
|
|
||||||
|
- **Implement AsyncOperationManager for background task processing:**
|
||||||
|
- Add new `async-manager.js` module to handle long-running operations asynchronously
|
||||||
|
- Support background execution of computationally intensive tasks like expansion and analysis
|
||||||
|
- Implement unique operation IDs with UUID generation for reliable tracking
|
||||||
|
- Add operation status tracking (pending, running, completed, failed)
|
||||||
|
- Create `get_operation_status` MCP tool to check on background task progress
|
||||||
|
- Forward progress reporting from background tasks to the client
|
||||||
|
- Implement operation history with automatic cleanup of completed operations
|
||||||
|
- Support proper error handling in background tasks with detailed status reporting
|
||||||
|
- Maintain context (log, session) for background operations ensuring consistent behavior
|
||||||
|
|
||||||
|
- **Implement initialize_project command:**
|
||||||
|
- Add new MCP tool to allow project setup via integrated MCP clients
|
||||||
|
- Create `initialize_project` direct function with proper parameter handling
|
||||||
|
- Improve onboarding experience by adding to mcp.json configuration
|
||||||
|
- Support project-specific metadata like name, description, and version
|
||||||
|
- Handle shell alias creation with proper confirmation
|
||||||
|
- Improve first-time user experience in AI environments
|
||||||
|
|
||||||
|
- **Refactor project root handling for MCP Server:**
|
||||||
|
- **Prioritize Session Roots**: MCP tools now extract the project root path directly from `session.roots[0].uri` provided by the client (e.g., Cursor).
|
||||||
|
- **New Utility `getProjectRootFromSession`**: Added to `mcp-server/src/tools/utils.js` to encapsulate session root extraction and decoding. **Further refined for more reliable detection, especially in integrated environments, including deriving root from script path and avoiding fallback to '/'.**
|
||||||
|
- **Simplify `findTasksJsonPath`**: The core path finding utility in `mcp-server/src/core/utils/path-utils.js` now prioritizes the `projectRoot` passed in `args` (originating from the session). Removed checks for `TASK_MASTER_PROJECT_ROOT` env var (we do not use this anymore) and package directory fallback. **Enhanced error handling to include detailed debug information (paths searched, CWD, server dir, etc.) and clearer potential solutions when `tasks.json` is not found.**
|
||||||
|
- **Retain CLI Fallbacks**: Kept `lastFoundProjectRoot` cache check and CWD search in `findTasksJsonPath` for compatibility with direct CLI usage.
|
||||||
|
|
||||||
|
- Updated all MCP tools to use the new project root handling:
|
||||||
|
- Tools now call `getProjectRootFromSession` to determine the root.
|
||||||
|
- This root is passed explicitly as `projectRoot` in the `args` object to the corresponding `*Direct` function.
|
||||||
|
- Direct functions continue to use the (now simplified) `findTasksJsonPath` to locate `tasks.json` within the provided root.
|
||||||
|
- This ensures tools work reliably in integrated environments without requiring the user to specify `--project-root`.
|
||||||
|
|
||||||
|
- Add comprehensive PROJECT_MARKERS array for detecting common project files (used in CLI fallback logic).
|
||||||
|
- Improved error messages with specific troubleshooting guidance.
|
||||||
|
- **Enhanced logging:**
|
||||||
|
- Indicate the source of project root selection more clearly.
|
||||||
|
- **Add verbose logging in `get-task.js` to trace session object content and resolved project root path, aiding debugging.**
|
||||||
|
|
||||||
|
- DRY refactoring by centralizing path utilities in `core/utils/path-utils.js` and session handling in `tools/utils.js`.
|
||||||
|
- Keep caching of `lastFoundProjectRoot` for CLI performance.
|
||||||
|
|
||||||
|
- Split monolithic task-master-core.js into separate function files within direct-functions directory.
|
||||||
|
- Implement update-task MCP command for updating a single task by ID.
|
||||||
|
- Implement update-subtask MCP command for appending information to specific subtasks.
|
||||||
|
- Implement generate MCP command for creating individual task files from tasks.json.
|
||||||
|
- Implement set-status MCP command for updating task status.
|
||||||
|
- Implement get-task MCP command for displaying detailed task information (renamed from show-task).
|
||||||
|
- Implement next-task MCP command for finding the next task to work on.
|
||||||
|
- Implement expand-task MCP command for breaking down tasks into subtasks.
|
||||||
|
- Implement add-task MCP command for creating new tasks using AI assistance.
|
||||||
|
- Implement add-subtask MCP command for adding subtasks to existing tasks.
|
||||||
|
- Implement remove-subtask MCP command for removing subtasks from parent tasks.
|
||||||
|
- Implement expand-all MCP command for expanding all tasks into subtasks.
|
||||||
|
- Implement analyze-complexity MCP command for analyzing task complexity.
|
||||||
|
- Implement clear-subtasks MCP command for clearing subtasks from parent tasks.
|
||||||
|
- Implement remove-dependency MCP command for removing dependencies from tasks.
|
||||||
|
- Implement validate-dependencies MCP command for checking validity of task dependencies.
|
||||||
|
- Implement fix-dependencies MCP command for automatically fixing invalid dependencies.
|
||||||
|
- Implement complexity-report MCP command for displaying task complexity analysis reports.
|
||||||
|
- Implement add-dependency MCP command for creating dependency relationships between tasks.
|
||||||
|
- Implement get-tasks MCP command for listing all tasks (renamed from list-tasks).
|
||||||
|
- Implement `initialize_project` MCP tool to allow project setup via MCP client and radically improve and simplify onboarding by adding to mcp.json (e.g., Cursor).
|
||||||
|
|
||||||
|
- Enhance documentation and tool descriptions:
|
||||||
|
- Create new `taskmaster.mdc` Cursor rule for comprehensive MCP tool and CLI command reference.
|
||||||
|
- Bundle taskmaster.mdc with npm package and include in project initialization.
|
||||||
|
- Add detailed descriptions for each tool's purpose, parameters, and common use cases.
|
||||||
|
- Include natural language patterns and keywords for better intent recognition.
|
||||||
|
- Document parameter descriptions with clear examples and default values.
|
||||||
|
- Add usage examples and context for each command/tool.
|
||||||
|
- **Update documentation (`mcp.mdc`, `utilities.mdc`, `architecture.mdc`, `new_features.mdc`, `commands.mdc`) to reflect the new session-based project root handling and the preferred MCP vs. CLI interaction model.**
|
||||||
|
- Improve clarity around project root auto-detection in tool documentation.
|
||||||
|
- Update tool descriptions to better reflect their actual behavior and capabilities.
|
||||||
|
- Add cross-references between related tools and commands.
|
||||||
|
- Include troubleshooting guidance in tool descriptions.
|
||||||
|
- **Add default values for `DEFAULT_SUBTASKS` and `DEFAULT_PRIORITY` to the example `.cursor/mcp.json` configuration.**
|
||||||
|
|
||||||
|
- Document MCP server naming conventions in architecture.mdc and mcp.mdc files (file names use kebab-case, direct functions use camelCase with Direct suffix, tool registration functions use camelCase with Tool suffix, and MCP tool names use snake_case).
|
||||||
|
- Update MCP tool naming to follow more intuitive conventions that better align with natural language requests in client chat applications.
|
||||||
|
- Enhance task show view with a color-coded progress bar for visualizing subtask completion percentage.
|
||||||
|
- Add "cancelled" status to UI module status configurations for marking tasks as cancelled without deletion.
|
||||||
|
- Improve MCP server resource documentation with comprehensive implementation examples and best practices.
|
||||||
|
- Enhance progress bars with status breakdown visualization showing proportional sections for different task statuses.
|
||||||
|
- Add improved status tracking for both tasks and subtasks with detailed counts by status.
|
||||||
|
- Optimize progress bar display with width constraints to prevent UI overflow on smaller terminals.
|
||||||
|
- Improve status counts display with clear text labels beside status icons for better readability.
|
||||||
|
- Treat deferred and cancelled tasks as effectively complete for progress calculation while maintaining visual distinction.
|
||||||
|
- **Fix `reportProgress` calls** to use the correct `{ progress, total? }` format.
|
||||||
@@ -1,18 +1,20 @@
|
|||||||
{
|
{
|
||||||
"mcpServers": {
|
"mcpServers": {
|
||||||
"taskmaster-ai": {
|
"taskmaster-ai": {
|
||||||
"command": "node",
|
"command": "node",
|
||||||
"args": ["./mcp-server/server.js"],
|
"args": [
|
||||||
"env": {
|
"./mcp-server/server.js"
|
||||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
],
|
||||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
"env": {
|
||||||
"MODEL": "claude-3-7-sonnet-20250219",
|
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||||
"PERPLEXITY_MODEL": "sonar-pro",
|
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||||
"MAX_TOKENS": 64000,
|
"MODEL": "claude-3-7-sonnet-20250219",
|
||||||
"TEMPERATURE": 0.2,
|
"PERPLEXITY_MODEL": "sonar-pro",
|
||||||
"DEFAULT_SUBTASKS": 5,
|
"MAX_TOKENS": 64000,
|
||||||
"DEFAULT_PRIORITY": "medium"
|
"TEMPERATURE": 0.4,
|
||||||
}
|
"DEFAULT_SUBTASKS": 5,
|
||||||
}
|
"DEFAULT_PRIORITY": "medium"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -14,13 +14,13 @@ alwaysApply: false
|
|||||||
- **Purpose**: Defines and registers all CLI commands using Commander.js.
|
- **Purpose**: Defines and registers all CLI commands using Commander.js.
|
||||||
- **Responsibilities** (See also: [`commands.mdc`](mdc:.cursor/rules/commands.mdc)):
|
- **Responsibilities** (See also: [`commands.mdc`](mdc:.cursor/rules/commands.mdc)):
|
||||||
- Parses command-line arguments and options.
|
- Parses command-line arguments and options.
|
||||||
- Invokes appropriate functions from other modules to execute commands (e.g., calls `initializeProject` from `init.js` for the `init` command).
|
- Invokes appropriate functions from other modules to execute commands.
|
||||||
- Handles user input and output related to command execution.
|
- Handles user input and output related to command execution.
|
||||||
- Implements input validation and error handling for CLI commands.
|
- Implements input validation and error handling for CLI commands.
|
||||||
- **Key Components**:
|
- **Key Components**:
|
||||||
- `programInstance` (Commander.js `Command` instance): Manages command definitions.
|
- `programInstance` (Commander.js `Command` instance): Manages command definitions.
|
||||||
- `registerCommands(programInstance)`: Function to register all application commands.
|
- `registerCommands(programInstance)`: Function to register all application commands.
|
||||||
- Command action handlers: Functions executed when a specific command is invoked, delegating to core modules.
|
- Command action handlers: Functions executed when a specific command is invoked.
|
||||||
|
|
||||||
- **[`task-manager.js`](mdc:scripts/modules/task-manager.js): Task Data Management**
|
- **[`task-manager.js`](mdc:scripts/modules/task-manager.js): Task Data Management**
|
||||||
- **Purpose**: Manages task data, including loading, saving, creating, updating, deleting, and querying tasks.
|
- **Purpose**: Manages task data, including loading, saving, creating, updating, deleting, and querying tasks.
|
||||||
@@ -148,134 +148,14 @@ alwaysApply: false
|
|||||||
- Robust error handling for background tasks
|
- Robust error handling for background tasks
|
||||||
- **Usage**: Used for CPU-intensive operations like task expansion and PRD parsing
|
- **Usage**: Used for CPU-intensive operations like task expansion and PRD parsing
|
||||||
|
|
||||||
- **[`init.js`](mdc:scripts/init.js): Project Initialization Logic**
|
|
||||||
- **Purpose**: Contains the core logic for setting up a new Task Master project structure.
|
|
||||||
- **Responsibilities**:
|
|
||||||
- Creates necessary directories (`.cursor/rules`, `scripts`, `tasks`).
|
|
||||||
- Copies template files (`.env.example`, `.gitignore`, rule files, `dev.js`, etc.).
|
|
||||||
- Creates or merges `package.json` with required dependencies and scripts.
|
|
||||||
- Sets up MCP configuration (`.cursor/mcp.json`).
|
|
||||||
- Optionally initializes a git repository and installs dependencies.
|
|
||||||
- Handles user prompts for project details *if* called without skip flags (`-y`).
|
|
||||||
- **Key Function**:
|
|
||||||
- `initializeProject(options)`: The main function exported and called by the `init` command's action handler in [`commands.js`](mdc:scripts/modules/commands.js). It receives parsed options directly.
|
|
||||||
- **Note**: This script is used as a module and no longer handles its own argument parsing or direct execution via a separate `bin` file.
|
|
||||||
|
|
||||||
- **Data Flow and Module Dependencies**:
|
- **Data Flow and Module Dependencies**:
|
||||||
|
|
||||||
- **Commands Initiate Actions**: User commands entered via the CLI (parsed by `commander` based on definitions in [`commands.js`](mdc:scripts/modules/commands.js)) are the entry points for most operations.
|
- **Commands Initiate Actions**: User commands entered via the CLI (handled by [`commands.js`](mdc:scripts/modules/commands.js)) are the entry points for most operations.
|
||||||
- **Command Handlers Delegate to Core Logic**: Action handlers within [`commands.js`](mdc:scripts/modules/commands.js) call functions in core modules like [`task-manager.js`](mdc:scripts/modules/task-manager.js), [`dependency-manager.js`](mdc:scripts/modules/dependency-manager.js), and [`init.js`](mdc:scripts/init.js) (for the `init` command) to perform the actual work.
|
- **Command Handlers Delegate to Managers**: Command handlers in [`commands.js`](mdc:scripts/modules/commands.js) call functions in [`task-manager.js`](mdc:scripts/modules/task-manager.js) and [`dependency-manager.js`](mdc:scripts/modules/dependency-manager.js) to perform core task and dependency management logic.
|
||||||
- **UI for Presentation**: [`ui.js`](mdc:scripts/modules/ui.js) is used by command handlers and task/dependency managers to display information to the user. UI functions primarily consume data and format it for output, without modifying core application state.
|
- **UI for Presentation**: [`ui.js`](mdc:scripts/modules/ui.js) is used by command handlers and task/dependency managers to display information to the user. UI functions primarily consume data and format it for output, without modifying core application state.
|
||||||
- **Utilities for Common Tasks**: [`utils.js`](mdc:scripts/modules/utils.js) provides helper functions used by all other modules for configuration, logging, file operations, and common data manipulations.
|
- **Utilities for Common Tasks**: [`utils.js`](mdc:scripts/modules/utils.js) provides helper functions used by all other modules for configuration, logging, file operations, and common data manipulations.
|
||||||
- **AI Services Integration**: AI functionalities (complexity analysis, task expansion, PRD parsing) are invoked from [`task-manager.js`](mdc:scripts/modules/task-manager.js) and potentially [`commands.js`](mdc:scripts/modules/commands.js), likely using functions that would reside in a dedicated `ai-services.js` module or be integrated within `utils.js` or `task-manager.js`.
|
- **AI Services Integration**: AI functionalities (complexity analysis, task expansion, PRD parsing) are invoked from [`task-manager.js`](mdc:scripts/modules/task-manager.js) and potentially [`commands.js`](mdc:scripts/modules/commands.js), likely using functions that would reside in a dedicated `ai-services.js` module or be integrated within `utils.js` or `task-manager.js`.
|
||||||
- **MCP Server Interaction**: External tools interact with the `mcp-server`. MCP Tool `execute` methods use `getProjectRootFromSession` to find the project root, then call direct function wrappers (in `mcp-server/src/core/direct-functions/`) passing the root in `args`. These wrappers handle path finding for `tasks.json` (using `path-utils.js`), validation, caching, call the core logic from `scripts/modules/` (passing logging context via the standard wrapper pattern detailed in mcp.mdc), and return a standardized result. The final MCP response is formatted by `mcp-server/src/tools/utils.js`. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for details.
|
- **MCP Server Interaction**: External tools interact with the `mcp-server`. MCP Tool `execute` methods use `getProjectRootFromSession` to find the project root, then call direct function wrappers (in `mcp-server/src/core/direct-functions/`) passing the root in `args`. These wrappers handle path finding for `tasks.json` (using `path-utils.js`), validation, caching, call the core logic from `scripts/modules/`, and return a standardized result. The final MCP response is formatted by `mcp-server/src/tools/utils.js`. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for details.
|
||||||
|
|
||||||
## Silent Mode Implementation Pattern in MCP Direct Functions
|
|
||||||
|
|
||||||
Direct functions (the `*Direct` functions in `mcp-server/src/core/direct-functions/`) need to carefully implement silent mode to prevent console logs from interfering with the structured JSON responses required by MCP. This involves both using `enableSilentMode`/`disableSilentMode` around core function calls AND passing the MCP logger via the standard wrapper pattern (see mcp.mdc). Here's the standard pattern for correct implementation:
|
|
||||||
|
|
||||||
1. **Import Silent Mode Utilities**:
|
|
||||||
```javascript
|
|
||||||
import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Parameter Matching with Core Functions**:
|
|
||||||
- ✅ **DO**: Ensure direct function parameters match the core function parameters
|
|
||||||
- ✅ **DO**: Check the original core function signature before implementing
|
|
||||||
- ❌ **DON'T**: Add parameters to direct functions that don't exist in core functions
|
|
||||||
```javascript
|
|
||||||
// Example: Core function signature
|
|
||||||
// async function expandTask(tasksPath, taskId, numSubtasks, useResearch, additionalContext, options)
|
|
||||||
|
|
||||||
// Direct function implementation - extract only parameters that exist in core
|
|
||||||
export async function expandTaskDirect(args, log, context = {}) {
|
|
||||||
// Extract parameters that match the core function
|
|
||||||
const taskId = parseInt(args.id, 10);
|
|
||||||
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
|
||||||
const useResearch = args.research === true;
|
|
||||||
const additionalContext = args.prompt || '';
|
|
||||||
|
|
||||||
// Later pass these parameters in the correct order to the core function
|
|
||||||
const result = await expandTask(
|
|
||||||
tasksPath,
|
|
||||||
taskId,
|
|
||||||
numSubtasks,
|
|
||||||
useResearch,
|
|
||||||
additionalContext,
|
|
||||||
{ mcpLog: log, session: context.session }
|
|
||||||
);
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **Checking Silent Mode State**:
|
|
||||||
- ✅ **DO**: Always use `isSilentMode()` function to check current status
|
|
||||||
- ❌ **DON'T**: Directly access the global `silentMode` variable or `global.silentMode`
|
|
||||||
```javascript
|
|
||||||
// CORRECT: Use the function to check current state
|
|
||||||
if (!isSilentMode()) {
|
|
||||||
// Only create a loading indicator if not in silent mode
|
|
||||||
loadingIndicator = startLoadingIndicator('Processing...');
|
|
||||||
}
|
|
||||||
|
|
||||||
// INCORRECT: Don't access global variables directly
|
|
||||||
if (!silentMode) { // ❌ WRONG
|
|
||||||
loadingIndicator = startLoadingIndicator('Processing...');
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
4. **Wrapping Core Function Calls**:
|
|
||||||
- ✅ **DO**: Use a try/finally block pattern to ensure silent mode is always restored
|
|
||||||
- ✅ **DO**: Enable silent mode before calling core functions that produce console output
|
|
||||||
- ✅ **DO**: Disable silent mode in a finally block to ensure it runs even if errors occur
|
|
||||||
- ❌ **DON'T**: Enable silent mode without ensuring it gets disabled
|
|
||||||
```javascript
|
|
||||||
export async function someDirectFunction(args, log) {
|
|
||||||
try {
|
|
||||||
// Argument preparation
|
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
|
||||||
const someArg = args.someArg;
|
|
||||||
|
|
||||||
// Enable silent mode to prevent console logs
|
|
||||||
enableSilentMode();
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Call core function which might produce console output
|
|
||||||
const result = await someCoreFunction(tasksPath, someArg);
|
|
||||||
|
|
||||||
// Return standardized result object
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: result,
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
} finally {
|
|
||||||
// ALWAYS disable silent mode in finally block
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Standard error handling
|
|
||||||
log.error(`Error in direct function: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: { code: 'OPERATION_ERROR', message: error.message },
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
5. **Mixed Parameter and Global Silent Mode Handling**:
|
|
||||||
- For functions that need to handle both a passed `silentMode` parameter and check global state:
|
|
||||||
```javascript
|
|
||||||
// Check both the function parameter and global state
|
|
||||||
const isSilent = options.silentMode || (typeof options.silentMode === 'undefined' && isSilentMode());
|
|
||||||
|
|
||||||
if (!isSilent) {
|
|
||||||
console.log('Operation starting...');
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
By following these patterns consistently, direct functions will properly manage console output suppression while ensuring that silent mode is always properly reset, even when errors occur. This creates a more robust system that helps prevent unexpected silent mode states that could cause logging problems in subsequent operations.
|
|
||||||
|
|
||||||
- **Testing Architecture**:
|
- **Testing Architecture**:
|
||||||
|
|
||||||
@@ -325,7 +205,7 @@ Follow these steps to add MCP support for an existing Task Master command (see [
|
|||||||
|
|
||||||
1. **Ensure Core Logic Exists**: Verify the core functionality is implemented and exported from the relevant module in `scripts/modules/`.
|
1. **Ensure Core Logic Exists**: Verify the core functionality is implemented and exported from the relevant module in `scripts/modules/`.
|
||||||
|
|
||||||
2. **Create Direct Function File in `mcp-server/src/core/direct-functions/`:**
|
2. **Create Direct Function File in `mcp-server/src/core/direct-functions/`**:
|
||||||
- Create a new file (e.g., `your-command.js`) using **kebab-case** naming.
|
- Create a new file (e.g., `your-command.js`) using **kebab-case** naming.
|
||||||
- Import necessary core functions, **`findTasksJsonPath` from `../utils/path-utils.js`**, and **silent mode utilities**.
|
- Import necessary core functions, **`findTasksJsonPath` from `../utils/path-utils.js`**, and **silent mode utilities**.
|
||||||
- Implement `async function yourCommandDirect(args, log)` using **camelCase** with `Direct` suffix:
|
- Implement `async function yourCommandDirect(args, log)` using **camelCase** with `Direct` suffix:
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ While this document details the implementation of Task Master's **CLI commands**
|
|||||||
programInstance
|
programInstance
|
||||||
.command('command-name')
|
.command('command-name')
|
||||||
.description('Clear, concise description of what the command does')
|
.description('Clear, concise description of what the command does')
|
||||||
.option('-o, --option <value>', 'Option description', 'default value')
|
.option('-s, --short-option <value>', 'Option description', 'default value')
|
||||||
.option('--long-option <value>', 'Option description')
|
.option('--long-option <value>', 'Option description')
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
// Command implementation
|
// Command implementation
|
||||||
@@ -34,8 +34,7 @@ While this document details the implementation of Task Master's **CLI commands**
|
|||||||
- **Command Handler Organization**:
|
- **Command Handler Organization**:
|
||||||
- ✅ DO: Keep action handlers concise and focused
|
- ✅ DO: Keep action handlers concise and focused
|
||||||
- ✅ DO: Extract core functionality to appropriate modules
|
- ✅ DO: Extract core functionality to appropriate modules
|
||||||
- ✅ DO: Have the action handler import and call the relevant function(s) from core modules (e.g., `task-manager.js`, `init.js`), passing the parsed `options`.
|
- ✅ DO: Include validation for required parameters
|
||||||
- ✅ DO: Perform basic parameter validation (e.g., checking for required options) within the action handler or at the start of the called core function.
|
|
||||||
- ❌ DON'T: Implement business logic in command handlers
|
- ❌ DON'T: Implement business logic in command handlers
|
||||||
|
|
||||||
## Best Practices for Removal/Delete Commands
|
## Best Practices for Removal/Delete Commands
|
||||||
@@ -153,8 +152,8 @@ When implementing commands that delete or remove data (like `remove-task` or `re
|
|||||||
```javascript
|
```javascript
|
||||||
// ✅ DO: Suggest alternatives for destructive operations
|
// ✅ DO: Suggest alternatives for destructive operations
|
||||||
console.log(chalk.yellow('Note: If you just want to exclude this task from active work, consider:'));
|
console.log(chalk.yellow('Note: If you just want to exclude this task from active work, consider:'));
|
||||||
console.log(chalk.cyan(` task-master set-status --id='${taskId}' --status='cancelled'`));
|
console.log(chalk.cyan(` task-master set-status --id=${taskId} --status=cancelled`));
|
||||||
console.log(chalk.cyan(` task-master set-status --id='${taskId}' --status='deferred'`));
|
console.log(chalk.cyan(` task-master set-status --id=${taskId} --status=deferred`));
|
||||||
console.log('This preserves the task and its history for reference.');
|
console.log('This preserves the task and its history for reference.');
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -254,7 +253,7 @@ When implementing commands that delete or remove data (like `remove-task` or `re
|
|||||||
const taskId = parseInt(options.id, 10);
|
const taskId = parseInt(options.id, 10);
|
||||||
if (isNaN(taskId) || taskId <= 0) {
|
if (isNaN(taskId) || taskId <= 0) {
|
||||||
console.error(chalk.red(`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`));
|
console.error(chalk.red(`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`));
|
||||||
console.log(chalk.yellow('Usage example: task-master update-task --id=\'23\' --prompt=\'Update with new information.\nEnsure proper error handling.\''));
|
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -300,8 +299,8 @@ When implementing commands that delete or remove data (like `remove-task` or `re
|
|||||||
(dependencies.length > 0 ? chalk.white(`Dependencies: ${dependencies.join(', ')}`) + '\n' : '') +
|
(dependencies.length > 0 ? chalk.white(`Dependencies: ${dependencies.join(', ')}`) + '\n' : '') +
|
||||||
'\n' +
|
'\n' +
|
||||||
chalk.white.bold('Next Steps:') + '\n' +
|
chalk.white.bold('Next Steps:') + '\n' +
|
||||||
chalk.cyan(`1. Run ${chalk.yellow(`task-master show '${parentId}'`)} to see the parent task with all subtasks`) + '\n' +
|
chalk.cyan(`1. Run ${chalk.yellow(`task-master show ${parentId}`)} to see the parent task with all subtasks`) + '\n' +
|
||||||
chalk.cyan(`2. Run ${chalk.yellow(`task-master set-status --id='${parentId}.${subtask.id}' --status='in-progress'`)} to start working on it`),
|
chalk.cyan(`2. Run ${chalk.yellow(`task-master set-status --id=${parentId}.${subtask.id} --status=in-progress`)} to start working on it`),
|
||||||
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
|
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
|
||||||
));
|
));
|
||||||
```
|
```
|
||||||
@@ -376,7 +375,7 @@ When implementing commands that delete or remove data (like `remove-task` or `re
|
|||||||
' --option1 <value> Description of option1 (required)\n' +
|
' --option1 <value> Description of option1 (required)\n' +
|
||||||
' --option2 <value> Description of option2\n\n' +
|
' --option2 <value> Description of option2\n\n' +
|
||||||
chalk.cyan('Examples:') + '\n' +
|
chalk.cyan('Examples:') + '\n' +
|
||||||
' task-master command --option1=\'value1\' --option2=\'value2\'',
|
' task-master command --option1=value --option2=value',
|
||||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@@ -419,7 +418,7 @@ When implementing commands that delete or remove data (like `remove-task` or `re
|
|||||||
// Provide more helpful error messages for common issues
|
// Provide more helpful error messages for common issues
|
||||||
if (error.message.includes('task') && error.message.includes('not found')) {
|
if (error.message.includes('task') && error.message.includes('not found')) {
|
||||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||||
console.log(' 1. Run \'task-master list\' to see all available task IDs');
|
console.log(' 1. Run task-master list to see all available task IDs');
|
||||||
console.log(' 2. Use a valid task ID with the --id parameter');
|
console.log(' 2. Use a valid task ID with the --id parameter');
|
||||||
} else if (error.message.includes('API key')) {
|
} else if (error.message.includes('API key')) {
|
||||||
console.log(chalk.yellow('\nThis error is related to API keys. Check your environment variables.'));
|
console.log(chalk.yellow('\nThis error is related to API keys. Check your environment variables.'));
|
||||||
@@ -562,46 +561,4 @@ When implementing commands that delete or remove data (like `remove-task` or `re
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Refer to [`commands.js`](mdc:scripts/modules/commands.js) for implementation examples and [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for integration guidelines.
|
Refer to [`commands.js`](mdc:scripts/modules/commands.js) for implementation examples and [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for integration guidelines.
|
||||||
// Helper function to show add-subtask command help
|
|
||||||
function showAddSubtaskHelp() {
|
|
||||||
console.log(boxen(
|
|
||||||
chalk.white.bold('Add Subtask Command Help') + '\n\n' +
|
|
||||||
chalk.cyan('Usage:') + '\n' +
|
|
||||||
` task-master add-subtask --parent=<id> [options]\n\n` +
|
|
||||||
chalk.cyan('Options:') + '\n' +
|
|
||||||
' -p, --parent <id> Parent task ID (required)\n' +
|
|
||||||
' -i, --task-id <id> Existing task ID to convert to subtask\n' +
|
|
||||||
' -t, --title <title> Title for the new subtask\n' +
|
|
||||||
' -d, --description <text> Description for the new subtask\n' +
|
|
||||||
' --details <text> Implementation details for the new subtask\n' +
|
|
||||||
' --dependencies <ids> Comma-separated list of dependency IDs\n' +
|
|
||||||
' -s, --status <status> Status for the new subtask (default: "pending")\n' +
|
|
||||||
' -f, --file <file> Path to the tasks file (default: "tasks/tasks.json")\n' +
|
|
||||||
' --skip-generate Skip regenerating task files\n\n' +
|
|
||||||
chalk.cyan('Examples:') + '\n' +
|
|
||||||
' task-master add-subtask --parent=\'5\' --task-id=\'8\'\n' +
|
|
||||||
' task-master add-subtask -p \'5\' -t \'Implement login UI\' -d \'Create the login form\'\n' +
|
|
||||||
' task-master add-subtask -p \'5\' -t \'Handle API Errors\' --details $\'Handle 401 Unauthorized.\nHandle 500 Server Error.\'',
|
|
||||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function to show remove-subtask command help
|
|
||||||
function showRemoveSubtaskHelp() {
|
|
||||||
console.log(boxen(
|
|
||||||
chalk.white.bold('Remove Subtask Command Help') + '\n\n' +
|
|
||||||
chalk.cyan('Usage:') + '\n' +
|
|
||||||
` task-master remove-subtask --id=<parentId.subtaskId> [options]\n\n` +
|
|
||||||
chalk.cyan('Options:') + '\n' +
|
|
||||||
' -i, --id <id> Subtask ID(s) to remove in format "parentId.subtaskId" (can be comma-separated, required)\n' +
|
|
||||||
' -c, --convert Convert the subtask to a standalone task instead of deleting it\n' +
|
|
||||||
' -f, --file <file> Path to the tasks file (default: "tasks/tasks.json")\n' +
|
|
||||||
' --skip-generate Skip regenerating task files\n\n' +
|
|
||||||
chalk.cyan('Examples:') + '\n' +
|
|
||||||
' task-master remove-subtask --id=\'5.2\'\n' +
|
|
||||||
' task-master remove-subtask --id=\'5.2,6.3,7.1\'\n' +
|
|
||||||
' task-master remove-subtask --id=\'5.2\' --convert',
|
|
||||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
|
||||||
));
|
|
||||||
}
|
|
||||||
@@ -29,7 +29,7 @@ Task Master offers two primary ways to interact:
|
|||||||
|
|
||||||
## Standard Development Workflow Process
|
## Standard Development Workflow Process
|
||||||
|
|
||||||
- Start new projects by running `init` tool / `task-master init` or `parse_prd` / `task-master parse-prd --input='<prd-file.txt>'` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to generate initial tasks.json
|
- Start new projects by running `init` tool / `task-master init` or `parse_prd` / `task-master parse-prd --input=<prd-file.txt>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to generate initial tasks.json
|
||||||
- Begin coding sessions with `get_tasks` / `task-master list` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to see current tasks, status, and IDs
|
- Begin coding sessions with `get_tasks` / `task-master list` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to see current tasks, status, and IDs
|
||||||
- Determine the next task to work on using `next_task` / `task-master next` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
- Determine the next task to work on using `next_task` / `task-master next` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||||
- Analyze task complexity with `analyze_complexity` / `task-master analyze-complexity --research` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) before breaking down tasks
|
- Analyze task complexity with `analyze_complexity` / `task-master analyze-complexity --research` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) before breaking down tasks
|
||||||
@@ -45,7 +45,7 @@ Task Master offers two primary ways to interact:
|
|||||||
- Update dependent tasks when implementation differs from original plan using `update` / `task-master update --from=<id> --prompt="..."` or `update_task` / `task-master update-task --id=<id> --prompt="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc))
|
- Update dependent tasks when implementation differs from original plan using `update` / `task-master update --from=<id> --prompt="..."` or `update_task` / `task-master update-task --id=<id> --prompt="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc))
|
||||||
- Add new tasks discovered during implementation using `add_task` / `task-master add-task --prompt="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
- Add new tasks discovered during implementation using `add_task` / `task-master add-task --prompt="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||||
- Add new subtasks as needed using `add_subtask` / `task-master add-subtask --parent=<id> --title="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
- Add new subtasks as needed using `add_subtask` / `task-master add-subtask --parent=<id> --title="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||||
- Append notes or details to subtasks using `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='Add implementation notes here...\nMore details...'` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
- Append notes or details to subtasks using `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt="..."` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
||||||
- Generate task files with `generate` / `task-master generate` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) after updating tasks.json
|
- Generate task files with `generate` / `task-master generate` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) after updating tasks.json
|
||||||
- Maintain valid dependency structure with `add_dependency`/`remove_dependency` tools or `task-master add-dependency`/`remove-dependency` commands, `validate_dependencies` / `task-master validate-dependencies`, and `fix_dependencies` / `task-master fix-dependencies` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) when needed
|
- Maintain valid dependency structure with `add_dependency`/`remove_dependency` tools or `task-master add-dependency`/`remove-dependency` commands, `validate_dependencies` / `task-master validate-dependencies`, and `fix_dependencies` / `task-master fix-dependencies` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) when needed
|
||||||
- Respect dependency chains and task priorities when selecting work
|
- Respect dependency chains and task priorities when selecting work
|
||||||
@@ -74,8 +74,8 @@ Task Master offers two primary ways to interact:
|
|||||||
- When implementation differs significantly from planned approach
|
- When implementation differs significantly from planned approach
|
||||||
- When future tasks need modification due to current implementation choices
|
- When future tasks need modification due to current implementation choices
|
||||||
- When new dependencies or requirements emerge
|
- When new dependencies or requirements emerge
|
||||||
- Use `update` / `task-master update --from=<futureTaskId> --prompt='<explanation>\nUpdate context...'` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to update multiple future tasks.
|
- Use `update` / `task-master update --from=<futureTaskId> --prompt="<explanation>"` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to update multiple future tasks.
|
||||||
- Use `update_task` / `task-master update-task --id=<taskId> --prompt='<explanation>\nUpdate context...'` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to update a single specific task.
|
- Use `update_task` / `task-master update-task --id=<taskId> --prompt="<explanation>"` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to update a single specific task.
|
||||||
|
|
||||||
## Task Status Management
|
## Task Status Management
|
||||||
|
|
||||||
@@ -150,59 +150,6 @@ Task Master offers two primary ways to interact:
|
|||||||
- Task files are automatically regenerated after dependency changes
|
- Task files are automatically regenerated after dependency changes
|
||||||
- Dependencies are visualized with status indicators in task listings and files
|
- Dependencies are visualized with status indicators in task listings and files
|
||||||
|
|
||||||
## Iterative Subtask Implementation
|
|
||||||
|
|
||||||
Once a task has been broken down into subtasks using `expand_task` or similar methods, follow this iterative process for implementation:
|
|
||||||
|
|
||||||
1. **Understand the Goal (Preparation):**
|
|
||||||
* Use `get_task` / `task-master show <subtaskId>` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)) to thoroughly understand the specific goals and requirements of the subtask.
|
|
||||||
|
|
||||||
2. **Initial Exploration & Planning (Iteration 1):**
|
|
||||||
* This is the first attempt at creating a concrete implementation plan.
|
|
||||||
* Explore the codebase to identify the precise files, functions, and even specific lines of code that will need modification.
|
|
||||||
* Determine the intended code changes (diffs) and their locations.
|
|
||||||
* Gather *all* relevant details from this exploration phase.
|
|
||||||
|
|
||||||
3. **Log the Plan:**
|
|
||||||
* Run `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<detailed plan>'` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
|
||||||
* Provide the *complete and detailed* findings from the exploration phase in the prompt. Include file paths, line numbers, proposed diffs, reasoning, and any potential challenges identified. Do not omit details. The goal is to create a rich, timestamped log within the subtask's `details`.
|
|
||||||
|
|
||||||
4. **Verify the Plan:**
|
|
||||||
* Run `get_task` / `task-master show <subtaskId>` again to confirm that the detailed implementation plan has been successfully appended to the subtask's details.
|
|
||||||
|
|
||||||
5. **Begin Implementation:**
|
|
||||||
* Set the subtask status using `set_task_status` / `task-master set-status --id=<subtaskId> --status=in-progress` (see [`taskmaster.mdc`](mdc:.cursor/rules/taskmaster.mdc)).
|
|
||||||
* Start coding based on the logged plan.
|
|
||||||
|
|
||||||
6. **Refine and Log Progress (Iteration 2+):**
|
|
||||||
* As implementation progresses, you will encounter challenges, discover nuances, or confirm successful approaches.
|
|
||||||
* **Before appending new information**: Briefly review the *existing* details logged in the subtask (using `get_task` or recalling from context) to ensure the update adds fresh insights and avoids redundancy.
|
|
||||||
* **Regularly** use `update_subtask` / `task-master update-subtask --id=<subtaskId> --prompt='<update details>\n- What worked...\n- What didn't work...'` to append new findings.
|
|
||||||
* **Crucially, log:**
|
|
||||||
* What worked ("fundamental truths" discovered).
|
|
||||||
* What didn't work and why (to avoid repeating mistakes).
|
|
||||||
* Specific code snippets or configurations that were successful.
|
|
||||||
* Decisions made, especially if confirmed with user input.
|
|
||||||
* Any deviations from the initial plan and the reasoning.
|
|
||||||
* The objective is to continuously enrich the subtask's details, creating a log of the implementation journey that helps the AI (and human developers) learn, adapt, and avoid repeating errors.
|
|
||||||
|
|
||||||
7. **Review & Update Rules (Post-Implementation):**
|
|
||||||
* Once the implementation for the subtask is functionally complete, review all code changes and the relevant chat history.
|
|
||||||
* Identify any new or modified code patterns, conventions, or best practices established during the implementation.
|
|
||||||
* Create new or update existing Cursor rules in the `.cursor/rules/` directory to capture these patterns, following the guidelines in [`cursor_rules.mdc`](mdc:.cursor/rules/cursor_rules.mdc) and [`self_improve.mdc`](mdc:.cursor/rules/self_improve.mdc).
|
|
||||||
|
|
||||||
8. **Mark Task Complete:**
|
|
||||||
* After verifying the implementation and updating any necessary rules, mark the subtask as completed: `set_task_status` / `task-master set-status --id=<subtaskId> --status=done`.
|
|
||||||
|
|
||||||
9. **Commit Changes (If using Git):**
|
|
||||||
* Stage the relevant code changes and any updated/new rule files (`git add .`).
|
|
||||||
* Craft a comprehensive Git commit message summarizing the work done for the subtask, including both code implementation and any rule adjustments.
|
|
||||||
* Execute the commit command directly in the terminal (e.g., `git commit -m 'feat(module): Implement feature X for subtask <subtaskId>\n\n- Details about changes...\n- Updated rule Y for pattern Z'`).
|
|
||||||
* Consider if a Changeset is needed according to [`changeset.mdc`](mdc:.cursor/rules/changeset.mdc). If so, run `npm run changeset`, stage the generated file, and amend the commit or create a new one.
|
|
||||||
|
|
||||||
10. **Proceed to Next Subtask:**
|
|
||||||
* Identify the next subtask in the dependency chain (e.g., using `next_task` / `task-master next`) and repeat this iterative process starting from step 1.
|
|
||||||
|
|
||||||
## Code Analysis & Refactoring Techniques
|
## Code Analysis & Refactoring Techniques
|
||||||
|
|
||||||
- **Top-Level Function Search**:
|
- **Top-Level Function Search**:
|
||||||
|
|||||||
@@ -67,127 +67,65 @@ When implementing a new direct function in `mcp-server/src/core/direct-functions
|
|||||||
```
|
```
|
||||||
|
|
||||||
4. **Comprehensive Error Handling**:
|
4. **Comprehensive Error Handling**:
|
||||||
- ✅ **DO**: Wrap core function calls *and AI calls* in try/catch blocks
|
- ✅ **DO**: Wrap core function calls in try/catch blocks
|
||||||
- ✅ **DO**: Log errors with appropriate severity and context
|
- ✅ **DO**: Log errors with appropriate severity and context
|
||||||
- ✅ **DO**: Return standardized error objects with code and message (`{ success: false, error: { code: '...', message: '...' } }`)
|
- ✅ **DO**: Return standardized error objects with code and message
|
||||||
- ✅ **DO**: Handle file system errors, AI client errors, AI processing errors, and core function errors distinctly with appropriate codes.
|
- ✅ **DO**: Handle file system errors separately from function-specific errors
|
||||||
- **Example**:
|
- **Example**:
|
||||||
```javascript
|
```javascript
|
||||||
try {
|
try {
|
||||||
// Core function call or AI logic
|
// Core function call
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Failed to execute direct function logic: ${error.message}`);
|
log.error(`Failed to execute command: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: error.code || 'DIRECT_FUNCTION_ERROR', // Use specific codes like AI_CLIENT_ERROR, etc.
|
code: error.code || 'DIRECT_FUNCTION_ERROR',
|
||||||
message: error.message,
|
message: error.message,
|
||||||
details: error.stack // Optional: Include stack in debug mode
|
details: error.stack
|
||||||
},
|
},
|
||||||
fromCache: false // Ensure this is included if applicable
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
5. **Handling Logging Context (`mcpLog`)**:
|
5. **Silent Mode Implementation**:
|
||||||
- **Requirement**: Core functions that use the internal `report` helper function (common in `task-manager.js`, `dependency-manager.js`, etc.) expect the `options` object to potentially contain an `mcpLog` property. This `mcpLog` object **must** have callable methods for each log level (e.g., `mcpLog.info(...)`, `mcpLog.error(...)`).
|
- ✅ **DO**: Import silent mode utilities at the top of your file
|
||||||
- **Challenge**: The `log` object provided by FastMCP to the direct function's context, while functional, might not perfectly match this expected structure or could change in the future. Passing it directly can lead to runtime errors like `mcpLog[level] is not a function`.
|
|
||||||
- **Solution: The Logger Wrapper Pattern**: To reliably bridge the FastMCP `log` object and the core function's `mcpLog` expectation, use a simple wrapper object within the direct function:
|
|
||||||
```javascript
|
```javascript
|
||||||
// Standard logWrapper pattern within a Direct Function
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
const logWrapper = {
|
|
||||||
info: (message, ...args) => log.info(message, ...args),
|
|
||||||
warn: (message, ...args) => log.warn(message, ...args),
|
|
||||||
error: (message, ...args) => log.error(message, ...args),
|
|
||||||
debug: (message, ...args) => log.debug && log.debug(message, ...args), // Handle optional debug
|
|
||||||
success: (message, ...args) => log.info(message, ...args) // Map success to info if needed
|
|
||||||
};
|
|
||||||
|
|
||||||
// ... later when calling the core function ...
|
|
||||||
await coreFunction(
|
|
||||||
// ... other arguments ...
|
|
||||||
tasksPath,
|
|
||||||
taskId,
|
|
||||||
{
|
|
||||||
mcpLog: logWrapper, // Pass the wrapper object
|
|
||||||
session
|
|
||||||
},
|
|
||||||
'json' // Pass 'json' output format if supported by core function
|
|
||||||
);
|
|
||||||
```
|
```
|
||||||
- **Critical For JSON Output Format**: Passing the `logWrapper` as `mcpLog` serves a dual purpose:
|
- ✅ **DO**: Wrap core function calls with silent mode control
|
||||||
1. **Prevents Runtime Errors**: It ensures the `mcpLog[level](...)` calls within the core function succeed
|
```javascript
|
||||||
2. **Controls Output Format**: In functions like `updateTaskById` and `updateSubtaskById`, the presence of `mcpLog` in the options triggers setting `outputFormat = 'json'` (instead of 'text'). This prevents UI elements (spinners, boxes) from being generated, which would break the JSON response.
|
// Enable silent mode before the core function call
|
||||||
- **Proven Solution**: This pattern has successfully fixed multiple issues in our MCP tools (including `update-task` and `update-subtask`), where direct passing of the `log` object or omitting `mcpLog` led to either runtime errors or JSON parsing failures from UI output.
|
enableSilentMode();
|
||||||
- **When To Use**: Implement this wrapper in any direct function that calls a core function with an `options` object that might use `mcpLog` for logging or output format control.
|
|
||||||
- **Why it Works**: The `logWrapper` explicitly defines the `.info()`, `.warn()`, `.error()`, etc., methods that the core function's `report` helper needs, ensuring the `mcpLog[level](...)` call succeeds. It simply forwards the logging calls to the actual FastMCP `log` object.
|
// Execute core function
|
||||||
- **Combined with Silent Mode**: Remember that using the `logWrapper` for `mcpLog` is **necessary *in addition* to using `enableSilentMode()` / `disableSilentMode()`** (see next point). The wrapper handles structured logging *within* the core function, while silent mode suppresses direct `console.log` and UI elements (spinners, boxes) that would break the MCP JSON response.
|
const result = await coreFunction(param1, param2);
|
||||||
|
|
||||||
6. **Silent Mode Implementation**:
|
// Restore normal logging
|
||||||
- ✅ **DO**: Import silent mode utilities at the top: `import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';`
|
disableSilentMode();
|
||||||
- ✅ **DO**: Ensure core Task Master functions called from direct functions do **not** pollute `stdout` with console output (banners, spinners, logs) that would break MCP's JSON communication.
|
```
|
||||||
- **Preferred**: Modify the core function to accept an `outputFormat: 'json'` parameter and check it internally before printing UI elements. Pass `'json'` from the direct function.
|
- ✅ **DO**: Add proper error handling to ensure silent mode is disabled
|
||||||
- **Required Fallback/Guarantee**: If the core function cannot be modified or its output suppression is unreliable, **wrap the core function call** within the direct function using `enableSilentMode()` / `disableSilentMode()` in a `try/finally` block. This guarantees no console output interferes with the MCP response.
|
```javascript
|
||||||
- ✅ **DO**: Use `isSilentMode()` function to check global silent mode status if needed (rare in direct functions), NEVER access the global `silentMode` variable directly.
|
try {
|
||||||
- ❌ **DON'T**: Wrap AI client initialization or AI API calls in `enable/disableSilentMode`; their logging is controlled via the `log` object (passed potentially within the `logWrapper` for core functions).
|
enableSilentMode();
|
||||||
- ❌ **DON'T**: Assume a core function is silent just because it *should* be. Verify or use the `enable/disableSilentMode` wrapper.
|
// Core function execution
|
||||||
- **Example (Direct Function Guaranteeing Silence and using Log Wrapper)**:
|
const result = await coreFunction(param1, param2);
|
||||||
```javascript
|
disableSilentMode();
|
||||||
export async function coreWrapperDirect(args, log, context = {}) {
|
return { success: true, data: result };
|
||||||
const { session } = context;
|
} catch (error) {
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
// Create the logger wrapper
|
log.error(`Error in function: ${error.message}`);
|
||||||
const logWrapper = { /* ... as defined above ... */ };
|
return {
|
||||||
|
success: false,
|
||||||
enableSilentMode(); // Ensure silence for direct console output
|
error: { code: 'ERROR_CODE', message: error.message }
|
||||||
try {
|
};
|
||||||
// Call core function, passing wrapper and 'json' format
|
}
|
||||||
const result = await coreFunction(
|
```
|
||||||
tasksPath,
|
- ❌ **DON'T**: Forget to disable silent mode when errors occur
|
||||||
args.param1,
|
- ❌ **DON'T**: Leave silent mode enabled outside a direct function's scope
|
||||||
{ mcpLog: logWrapper, session },
|
- ❌ **DON'T**: Skip silent mode for core function calls that generate logs
|
||||||
'json' // Explicitly request JSON format if supported
|
|
||||||
);
|
|
||||||
return { success: true, data: result };
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error: ${error.message}`);
|
|
||||||
// Return standardized error object
|
|
||||||
return { success: false, error: { /* ... */ } };
|
|
||||||
} finally {
|
|
||||||
disableSilentMode(); // Critical: Always disable in finally
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
7. **Debugging MCP/Core Logic Interaction**:
|
|
||||||
- ✅ **DO**: If an MCP tool fails with unclear errors (like JSON parsing failures), run the equivalent `task-master` CLI command in the terminal. The CLI often provides more detailed error messages originating from the core logic (e.g., `ReferenceError`, stack traces) that are obscured by the MCP layer.
|
|
||||||
|
|
||||||
### Specific Guidelines for AI-Based Direct Functions
|
|
||||||
|
|
||||||
Direct functions that interact with AI (e.g., `addTaskDirect`, `expandTaskDirect`) have additional responsibilities:
|
|
||||||
|
|
||||||
- **Context Parameter**: These functions receive an additional `context` object as their third parameter. **Critically, this object should only contain `{ session }`**. Do NOT expect or use `reportProgress` from this context.
|
|
||||||
```javascript
|
|
||||||
export async function yourAIDirect(args, log, context = {}) {
|
|
||||||
const { session } = context; // Only expect session
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
- **AI Client Initialization**:
|
|
||||||
- ✅ **DO**: Use the utilities from [`mcp-server/src/core/utils/ai-client-utils.js`](mdc:mcp-server/src/core/utils/ai-client-utils.js) (e.g., `getAnthropicClientForMCP(session, log)`) to get AI client instances. These correctly use the `session` object to resolve API keys.
|
|
||||||
- ✅ **DO**: Wrap client initialization in a try/catch block and return a specific `AI_CLIENT_ERROR` on failure.
|
|
||||||
- **AI Interaction**:
|
|
||||||
- ✅ **DO**: Build prompts using helper functions where appropriate (e.g., from `ai-prompt-helpers.js`).
|
|
||||||
- ✅ **DO**: Make the AI API call using appropriate helpers (e.g., `_handleAnthropicStream`). Pass the `log` object to these helpers for internal logging. **Do NOT pass `reportProgress`**.
|
|
||||||
- ✅ **DO**: Parse the AI response using helpers (e.g., `parseTaskJsonResponse`) and handle parsing errors with a specific code (e.g., `RESPONSE_PARSING_ERROR`).
|
|
||||||
- **Calling Core Logic**:
|
|
||||||
- ✅ **DO**: After successful AI interaction, call the relevant core Task Master function (from `scripts/modules/`) if needed (e.g., `addTaskDirect` calls `addTask`).
|
|
||||||
- ✅ **DO**: Pass necessary data, including potentially the parsed AI results, to the core function.
|
|
||||||
- ✅ **DO**: If the core function can produce console output, call it with an `outputFormat: 'json'` argument (or similar, depending on the function) to suppress CLI output. Ensure the core function is updated to respect this. Use `enableSilentMode/disableSilentMode` around the core function call as a fallback if `outputFormat` is not supported or insufficient.
|
|
||||||
- **Progress Indication**:
|
|
||||||
- ❌ **DON'T**: Call `reportProgress` within the direct function.
|
|
||||||
- ✅ **DO**: If intermediate progress status is needed *within* the long-running direct function, use standard logging: `log.info('Progress: Processing AI response...')`.
|
|
||||||
|
|
||||||
## Tool Definition and Execution
|
## Tool Definition and Execution
|
||||||
|
|
||||||
@@ -221,21 +159,14 @@ server.addTool({
|
|||||||
The `execute` function receives validated arguments and the FastMCP context:
|
The `execute` function receives validated arguments and the FastMCP context:
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Standard signature
|
|
||||||
execute: async (args, context) => {
|
execute: async (args, context) => {
|
||||||
// Tool implementation
|
// Tool implementation
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destructured signature (recommended)
|
|
||||||
execute: async (args, { log, reportProgress, session }) => {
|
|
||||||
// Tool implementation
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- **args**: The first parameter contains all the validated parameters defined in the tool's schema.
|
- **args**: The first parameter contains all the validated parameters defined in the tool's schema.
|
||||||
- **context**: The second parameter is an object containing `{ log, reportProgress, session }` provided by FastMCP.
|
- **context**: The second parameter is an object containing `{ log, reportProgress, session }` provided by FastMCP.
|
||||||
- ✅ **DO**: Use `{ log, session }` when calling direct functions.
|
- ✅ **DO**: `execute: async (args, { log, reportProgress, session }) => {}`
|
||||||
- ⚠️ **WARNING**: Avoid passing `reportProgress` down to direct functions due to client compatibility issues. See Progress Reporting Convention below.
|
|
||||||
|
|
||||||
### Standard Tool Execution Pattern
|
### Standard Tool Execution Pattern
|
||||||
|
|
||||||
@@ -243,27 +174,20 @@ The `execute` method within each MCP tool (in `mcp-server/src/tools/*.js`) shoul
|
|||||||
|
|
||||||
1. **Log Entry**: Log the start of the tool execution with relevant arguments.
|
1. **Log Entry**: Log the start of the tool execution with relevant arguments.
|
||||||
2. **Get Project Root**: Use the `getProjectRootFromSession(session, log)` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)) to extract the project root path from the client session. Fall back to `args.projectRoot` if the session doesn't provide a root.
|
2. **Get Project Root**: Use the `getProjectRootFromSession(session, log)` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)) to extract the project root path from the client session. Fall back to `args.projectRoot` if the session doesn't provide a root.
|
||||||
3. **Call Direct Function**: Invoke the corresponding `*Direct` function wrapper (e.g., `listTasksDirect` from [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js)), passing an updated `args` object that includes the resolved `projectRoot`. Crucially, the third argument (context) passed to the direct function should **only include `{ log, session }`**. **Do NOT pass `reportProgress`**.
|
3. **Call Direct Function**: Invoke the corresponding `*Direct` function wrapper (e.g., `listTasksDirect` from [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js)), passing an updated `args` object that includes the resolved `projectRoot`, along with the `log` object: `await someDirectFunction({ ...args, projectRoot: resolvedRootFolder }, log);`
|
||||||
```javascript
|
|
||||||
// Example call to a non-AI direct function
|
|
||||||
const result = await someDirectFunction({ ...args, projectRoot }, log);
|
|
||||||
|
|
||||||
// Example call to an AI-based direct function
|
|
||||||
const resultAI = await someAIDirect({ ...args, projectRoot }, log, { session });
|
|
||||||
```
|
|
||||||
4. **Handle Result**: Receive the result object (`{ success, data/error, fromCache }`) from the `*Direct` function.
|
4. **Handle Result**: Receive the result object (`{ success, data/error, fromCache }`) from the `*Direct` function.
|
||||||
5. **Format Response**: Pass this result object to the `handleApiResult` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)) for standardized MCP response formatting and error handling.
|
5. **Format Response**: Pass this result object to the `handleApiResult` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)) for standardized MCP response formatting and error handling.
|
||||||
6. **Return**: Return the formatted response object provided by `handleApiResult`.
|
6. **Return**: Return the formatted response object provided by `handleApiResult`.
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Example execute method structure for a tool calling an AI-based direct function
|
// Example execute method structure
|
||||||
import { getProjectRootFromSession, handleApiResult, createErrorResponse } from './utils.js';
|
import { getProjectRootFromSession, handleApiResult, createErrorResponse } from './utils.js';
|
||||||
import { someAIDirectFunction } from '../core/task-master-core.js';
|
import { someDirectFunction } from '../core/task-master-core.js';
|
||||||
|
|
||||||
// ... inside server.addTool({...})
|
// ... inside server.addTool({...})
|
||||||
execute: async (args, { log, session }) => { // Note: reportProgress is omitted here
|
execute: async (args, { log, reportProgress, session }) => {
|
||||||
try {
|
try {
|
||||||
log.info(`Starting AI tool execution with args: ${JSON.stringify(args)}`);
|
log.info(`Starting tool execution with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// 1. Get Project Root
|
// 1. Get Project Root
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
@@ -272,17 +196,17 @@ execute: async (args, { log, session }) => { // Note: reportProgress is omitted
|
|||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Call AI-Based Direct Function (passing only log and session in context)
|
// 2. Call Direct Function (passing resolved root)
|
||||||
const result = await someAIDirectFunction({
|
const result = await someDirectFunction({
|
||||||
...args,
|
...args,
|
||||||
projectRoot: rootFolder // Ensure projectRoot is explicitly passed
|
projectRoot: rootFolder // Ensure projectRoot is explicitly passed
|
||||||
}, log, { session }); // Pass session here, NO reportProgress
|
}, log);
|
||||||
|
|
||||||
// 3. Handle and Format Response
|
// 3. Handle and Format Response
|
||||||
return handleApiResult(result, log);
|
return handleApiResult(result, log);
|
||||||
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error during AI tool execution: ${error.message}`);
|
log.error(`Error during tool execution: ${error.message}`);
|
||||||
return createErrorResponse(error.message);
|
return createErrorResponse(error.message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -290,17 +214,15 @@ execute: async (args, { log, session }) => { // Note: reportProgress is omitted
|
|||||||
|
|
||||||
### Using AsyncOperationManager for Background Tasks
|
### Using AsyncOperationManager for Background Tasks
|
||||||
|
|
||||||
For tools that execute potentially long-running operations *where the AI call is just one part* (e.g., `expand-task`, `update`), use the AsyncOperationManager. The `add-task` command, as refactored, does *not* require this in the MCP tool layer because the direct function handles the primary AI work and returns the final result synchronously from the perspective of the MCP tool.
|
For tools that execute long-running operations, use the AsyncOperationManager to run them in the background:
|
||||||
|
|
||||||
For tools that *do* use `AsyncOperationManager`:
|
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
import { AsyncOperationManager } from '../utils/async-operation-manager.js'; // Correct path assuming utils location
|
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
||||||
import { getProjectRootFromSession, createContentResponse, createErrorResponse } from './utils.js';
|
import { getProjectRootFromSession, createContentResponse, createErrorResponse } from './utils.js';
|
||||||
import { someIntensiveDirect } from '../core/task-master-core.js';
|
import { someIntensiveDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
// ... inside server.addTool({...})
|
// ... inside server.addTool({...})
|
||||||
execute: async (args, { log, session }) => { // Note: reportProgress omitted
|
execute: async (args, { log, reportProgress, session }) => {
|
||||||
try {
|
try {
|
||||||
log.info(`Starting background operation with args: ${JSON.stringify(args)}`);
|
log.info(`Starting background operation with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
@@ -310,59 +232,53 @@ execute: async (args, { log, session }) => { // Note: reportProgress omitted
|
|||||||
rootFolder = args.projectRoot;
|
rootFolder = args.projectRoot;
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create operation description
|
|
||||||
const operationDescription = `Expanding task ${args.id}...`; // Example
|
|
||||||
|
|
||||||
// 2. Start async operation using AsyncOperationManager
|
// 2. Add operation to the async manager
|
||||||
const operation = AsyncOperationManager.createOperation(
|
const operationId = asyncOperationManager.addOperation(
|
||||||
operationDescription,
|
someIntensiveDirect, // The direct function to execute
|
||||||
async (reportProgressCallback) => { // This callback is provided by AsyncOperationManager
|
{ ...args, projectRoot: rootFolder }, // Args to pass
|
||||||
// This runs in the background
|
{ log, reportProgress, session } // Context to preserve
|
||||||
try {
|
|
||||||
// Report initial progress *from the manager's callback*
|
|
||||||
reportProgressCallback({ progress: 0, status: 'Starting operation...' });
|
|
||||||
|
|
||||||
// Call the direct function (passing only session context)
|
|
||||||
const result = await someIntensiveDirect(
|
|
||||||
{ ...args, projectRoot: rootFolder },
|
|
||||||
log,
|
|
||||||
{ session } // Pass session, NO reportProgress
|
|
||||||
);
|
|
||||||
|
|
||||||
// Report final progress *from the manager's callback*
|
|
||||||
reportProgressCallback({
|
|
||||||
progress: 100,
|
|
||||||
status: result.success ? 'Operation completed' : 'Operation failed',
|
|
||||||
result: result.data, // Include final data if successful
|
|
||||||
error: result.error // Include error object if failed
|
|
||||||
});
|
|
||||||
|
|
||||||
return result; // Return the direct function's result
|
|
||||||
} catch (error) {
|
|
||||||
// Handle errors within the async task
|
|
||||||
reportProgressCallback({
|
|
||||||
progress: 100,
|
|
||||||
status: 'Operation failed critically',
|
|
||||||
error: { message: error.message, code: error.code || 'ASYNC_OPERATION_FAILED' }
|
|
||||||
});
|
|
||||||
throw error; // Re-throw for the manager to catch
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// 3. Return immediate response with operation ID
|
// 3. Return immediate response with operation ID
|
||||||
return {
|
return createContentResponse({
|
||||||
status: 202, // StatusCodes.ACCEPTED
|
message: "Operation started successfully",
|
||||||
body: {
|
operationId,
|
||||||
success: true,
|
status: "pending"
|
||||||
message: 'Operation started',
|
});
|
||||||
operationId: operation.id
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error starting background operation: ${error.message}`);
|
log.error(`Error starting background operation: ${error.message}`);
|
||||||
return createErrorResponse(`Failed to start operation: ${error.message}`); // Use standard error response
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Clients should then use the `get_operation_status` tool to check on operation progress:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// In get-operation-status.js
|
||||||
|
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
||||||
|
import { createContentResponse, createErrorResponse } from './utils.js';
|
||||||
|
|
||||||
|
// ... inside server.addTool({...})
|
||||||
|
execute: async (args, { log }) => {
|
||||||
|
try {
|
||||||
|
const { operationId } = args;
|
||||||
|
log.info(`Checking status of operation: ${operationId}`);
|
||||||
|
|
||||||
|
const status = asyncOperationManager.getStatus(operationId);
|
||||||
|
|
||||||
|
if (status.status === 'not_found') {
|
||||||
|
return createErrorResponse(status.error.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
return createContentResponse({
|
||||||
|
...status,
|
||||||
|
message: `Operation status: ${status.status}`
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error checking operation status: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -406,7 +322,7 @@ export function registerInitializeProjectTool(server) {
|
|||||||
|
|
||||||
### Logging Convention
|
### Logging Convention
|
||||||
|
|
||||||
The `log` object (destructured from `context`) provides standardized logging methods. Use it within both the `execute` method and the `*Direct` functions. **If progress indication is needed within a direct function, use `log.info()` instead of `reportProgress`**.
|
The `log` object (destructured from `context`) provides standardized logging methods. Use it within both the `execute` method and the `*Direct` functions.
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Proper logging usage
|
// Proper logging usage
|
||||||
@@ -414,14 +330,19 @@ log.info(`Starting ${toolName} with parameters: ${JSON.stringify(sanitizedArgs)}
|
|||||||
log.debug("Detailed operation info", { data });
|
log.debug("Detailed operation info", { data });
|
||||||
log.warn("Potential issue detected");
|
log.warn("Potential issue detected");
|
||||||
log.error(`Error occurred: ${error.message}`, { stack: error.stack });
|
log.error(`Error occurred: ${error.message}`, { stack: error.stack });
|
||||||
log.info('Progress: 50% - AI call initiated...'); // Example progress logging
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Progress Reporting Convention
|
### Progress Reporting Convention
|
||||||
|
|
||||||
- ⚠️ **DEPRECATED within Direct Functions**: The `reportProgress` function passed in the `context` object should **NOT** be called from within `*Direct` functions. Doing so can cause client-side validation errors due to missing/incorrect `progressToken` handling.
|
Use `reportProgress` (destructured from `context`) for long-running operations. It expects an object `{ progress: number, total?: number }`.
|
||||||
- ✅ **DO**: For tools using `AsyncOperationManager`, use the `reportProgressCallback` function *provided by the manager* within the background task definition (as shown in the `AsyncOperationManager` example above) to report progress updates for the *overall operation*.
|
|
||||||
- ✅ **DO**: If finer-grained progress needs to be indicated *during* the execution of a `*Direct` function (whether called directly or via `AsyncOperationManager`), use `log.info()` statements (e.g., `log.info('Progress: Parsing AI response...')`).
|
```javascript
|
||||||
|
await reportProgress({ progress: 0 }); // Start
|
||||||
|
// ... work ...
|
||||||
|
await reportProgress({ progress: 50 }); // Intermediate (total optional)
|
||||||
|
// ... more work ...
|
||||||
|
await reportProgress({ progress: 100 }); // Complete
|
||||||
|
```
|
||||||
|
|
||||||
### Session Usage Convention
|
### Session Usage Convention
|
||||||
|
|
||||||
@@ -429,39 +350,32 @@ The `session` object (destructured from `context`) contains authenticated sessio
|
|||||||
|
|
||||||
- **Authentication**: Access user-specific data (`session.userId`, etc.) if authentication is implemented.
|
- **Authentication**: Access user-specific data (`session.userId`, etc.) if authentication is implemented.
|
||||||
- **Project Root**: The primary use in Task Master is accessing `session.roots` to determine the client's project root directory via the `getProjectRootFromSession` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)). See the Standard Tool Execution Pattern above.
|
- **Project Root**: The primary use in Task Master is accessing `session.roots` to determine the client's project root directory via the `getProjectRootFromSession` utility (from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js)). See the Standard Tool Execution Pattern above.
|
||||||
- **Environment Variables**: The `session.env` object is critical for AI tools. Pass the `session` object to the `*Direct` function's context, and then to AI client utility functions (like `getAnthropicClientForMCP`) which will extract API keys and other relevant environment settings (e.g., `MODEL`, `MAX_TOKENS`) from `session.env`.
|
|
||||||
- **Capabilities**: Can be used to check client capabilities (`session.clientCapabilities`).
|
- **Capabilities**: Can be used to check client capabilities (`session.clientCapabilities`).
|
||||||
|
|
||||||
## Direct Function Wrappers (`*Direct`)
|
## Direct Function Wrappers (`*Direct`)
|
||||||
|
|
||||||
These functions, located in `mcp-server/src/core/direct-functions/`, form the core logic execution layer for MCP tools.
|
These functions, located in `mcp-server/src/core/direct-functions/`, form the core logic execution layer for MCP tools.
|
||||||
|
|
||||||
- **Purpose**: Bridge MCP tools and core Task Master modules (`scripts/modules/*`). Handle AI interactions if applicable.
|
- **Purpose**: Bridge MCP tools and core Task Master modules (`scripts/modules/*`).
|
||||||
- **Responsibilities**:
|
- **Responsibilities**:
|
||||||
- Receive `args` (including the `projectRoot` determined by the tool), `log` object, and optionally a `context` object (containing **only `{ session }` if needed).
|
- Receive `args` (including the `projectRoot` determined by the tool) and `log` object.
|
||||||
- **Find `tasks.json`**: Use `findTasksJsonPath(args, log)` from [`core/utils/path-utils.js`](mdc:mcp-server/src/core/utils/path-utils.js).
|
- **Find `tasks.json`**: Use `findTasksJsonPath(args, log)` from [`core/utils/path-utils.js`](mdc:mcp-server/src/core/utils/path-utils.js). This function prioritizes the provided `args.projectRoot`.
|
||||||
- Validate arguments specific to the core logic.
|
- Validate arguments specific to the core logic.
|
||||||
- **Handle AI Logic (if applicable)**: Initialize AI clients (using `session` from context), build prompts, make AI calls, parse responses.
|
- **Implement Silent Mode**: Import and use `enableSilentMode` and `disableSilentMode` around core function calls.
|
||||||
- **Implement Caching (if applicable)**: Use `getCachedOrExecute` from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) for read operations.
|
- **Implement Caching**: Use `getCachedOrExecute` from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) for read operations.
|
||||||
- **Call Core Logic**: Call the underlying function from the core Task Master modules, passing necessary data (including AI results if applicable).
|
- Call the underlying function from the core Task Master modules.
|
||||||
- ✅ **DO**: Pass `outputFormat: 'json'` (or similar) to the core function if it might produce console output.
|
- Handle errors gracefully.
|
||||||
- ✅ **DO**: Wrap the core function call with `enableSilentMode/disableSilentMode` if necessary.
|
- Return a standardized result object: `{ success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }`.
|
||||||
- Handle errors gracefully (AI errors, core logic errors, file errors).
|
|
||||||
- Return a standardized result object: `{ success: boolean, data?: any, error?: { code: string, message: string }, fromCache?: boolean }`.
|
|
||||||
- ❌ **DON'T**: Call `reportProgress`. Use `log.info` for progress indication if needed.
|
|
||||||
|
|
||||||
## Key Principles
|
## Key Principles
|
||||||
|
|
||||||
- **Prefer Direct Function Calls**: MCP tools should always call `*Direct` wrappers instead of `executeTaskMasterCommand`.
|
- **Prefer Direct Function Calls**: MCP tools should always call `*Direct` wrappers instead of `executeTaskMasterCommand`.
|
||||||
- **Standardized Execution Flow**: Follow the pattern: MCP Tool -> `getProjectRootFromSession` -> `*Direct` Function -> Core Logic / AI Logic.
|
- **Standardized Execution Flow**: Follow the pattern: MCP Tool -> `getProjectRootFromSession` -> `*Direct` Function -> Core Logic.
|
||||||
- **Path Resolution via Direct Functions**: The `*Direct` function is responsible for finding the exact `tasks.json` path using `findTasksJsonPath`, relying on the `projectRoot` passed in `args`.
|
- **Path Resolution via Direct Functions**: The `*Direct` function is responsible for finding the exact `tasks.json` path using `findTasksJsonPath`, relying on the `projectRoot` passed in `args`.
|
||||||
- **AI Logic in Direct Functions**: For AI-based tools, the `*Direct` function handles AI client initialization, calls, and parsing, using the `session` object passed in its context.
|
- **Silent Mode in Direct Functions**: Wrap all core function calls with `enableSilentMode()` and `disableSilentMode()` to prevent logs from interfering with JSON responses.
|
||||||
- **Silent Mode in Direct Functions**: Wrap *core function* calls (from `scripts/modules`) with `enableSilentMode()` and `disableSilentMode()` if they produce console output not handled by `outputFormat`. Do not wrap AI calls.
|
- **Async Processing for Intensive Operations**: Use AsyncOperationManager for CPU-intensive or long-running operations.
|
||||||
- **Selective Async Processing**: Use `AsyncOperationManager` in the *MCP Tool layer* for operations involving multiple steps or long waits beyond a single AI call (e.g., file processing + AI call + file writing). Simple AI calls handled entirely within the `*Direct` function (like `addTaskDirect`) may not need it at the tool layer.
|
|
||||||
- **No `reportProgress` in Direct Functions**: Do not pass or use `reportProgress` within `*Direct` functions. Use `log.info()` for internal progress or report progress from the `AsyncOperationManager` callback in the MCP tool layer.
|
|
||||||
- **Output Formatting**: Ensure core functions called by `*Direct` functions can suppress CLI output, ideally via an `outputFormat` parameter.
|
|
||||||
- **Project Initialization**: Use the initialize_project tool for setting up new projects in integrated environments.
|
- **Project Initialization**: Use the initialize_project tool for setting up new projects in integrated environments.
|
||||||
- **Centralized Utilities**: Use helpers from `mcp-server/src/tools/utils.js`, `mcp-server/src/core/utils/path-utils.js`, and `mcp-server/src/core/utils/ai-client-utils.js`. See [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc).
|
- **Centralized Utilities**: Use helpers from `mcp-server/src/tools/utils.js` (like `handleApiResult`, `getProjectRootFromSession`, `getCachedOrExecute`) and `mcp-server/src/core/utils/path-utils.js` (`findTasksJsonPath`). See [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc).
|
||||||
- **Caching in Direct Functions**: Caching logic resides *within* the `*Direct` functions using `getCachedOrExecute`.
|
- **Caching in Direct Functions**: Caching logic resides *within* the `*Direct` functions using `getCachedOrExecute`.
|
||||||
|
|
||||||
## Resources and Resource Templates
|
## Resources and Resource Templates
|
||||||
@@ -478,38 +392,32 @@ Resources provide LLMs with static or dynamic data without executing tools.
|
|||||||
|
|
||||||
Follow these steps to add MCP support for an existing Task Master command (see [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for more detail):
|
Follow these steps to add MCP support for an existing Task Master command (see [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for more detail):
|
||||||
|
|
||||||
1. **Ensure Core Logic Exists**: Verify the core functionality is implemented and exported from the relevant module in `scripts/modules/`. Ensure the core function can suppress console output (e.g., via an `outputFormat` parameter).
|
1. **Ensure Core Logic Exists**: Verify the core functionality is implemented and exported from the relevant module in `scripts/modules/`.
|
||||||
|
|
||||||
2. **Create Direct Function File in `mcp-server/src/core/direct-functions/`**:
|
2. **Create Direct Function File in `mcp-server/src/core/direct-functions/`**:
|
||||||
- Create a new file (e.g., `your-command.js`) using **kebab-case** naming.
|
- Create a new file (e.g., `your-command.js`) using **kebab-case** naming.
|
||||||
- Import necessary core functions, `findTasksJsonPath`, silent mode utilities, and potentially AI client/prompt utilities.
|
- Import necessary core functions, **`findTasksJsonPath` from `../utils/path-utils.js`**, and **silent mode utilities**.
|
||||||
- Implement `async function yourCommandDirect(args, log, context = {})` using **camelCase** with `Direct` suffix. **Remember `context` should only contain `{ session }` if needed (for AI keys/config).**
|
- Implement `async function yourCommandDirect(args, log)` using **camelCase** with `Direct` suffix:
|
||||||
- **Path Resolution**: Obtain `tasksPath` using `findTasksJsonPath(args, log)`.
|
- **Path Resolution**: Obtain the tasks file path using `const tasksPath = findTasksJsonPath(args, log);`. This handles project root detection automatically based on `args.projectRoot`.
|
||||||
- Parse other `args` and perform necessary validation.
|
- Parse other `args` and perform necessary validation.
|
||||||
- **Handle AI (if applicable)**: Initialize clients using `get*ClientForMCP(session, log)`, build prompts, call AI, parse response. Handle AI-specific errors.
|
- **Implement Silent Mode**: Wrap core function calls with enableSilentMode/disableSilentMode.
|
||||||
- **Implement Caching (if applicable)**: Use `getCachedOrExecute`.
|
- **If Caching**: Implement caching using `getCachedOrExecute` from `../../tools/utils.js`.
|
||||||
- **Call Core Logic**:
|
- **If Not Caching**: Directly call the core logic function within a try/catch block.
|
||||||
- Wrap with `enableSilentMode/disableSilentMode` if necessary.
|
- Format the return as `{ success: true/false, data/error, fromCache: boolean }`.
|
||||||
- Pass `outputFormat: 'json'` (or similar) if applicable.
|
|
||||||
- Handle errors from the core function.
|
|
||||||
- Format the return as `{ success: true/false, data/error, fromCache?: boolean }`.
|
|
||||||
- ❌ **DON'T**: Call `reportProgress`.
|
|
||||||
- Export the wrapper function.
|
- Export the wrapper function.
|
||||||
|
|
||||||
3. **Update `task-master-core.js` with Import/Export**: Import and re-export your `*Direct` function and add it to the `directFunctions` map.
|
3. **Update `task-master-core.js` with Import/Export**: Import and re-export your `*Direct` function and add it to the `directFunctions` map.
|
||||||
|
|
||||||
4. **Create MCP Tool (`mcp-server/src/tools/`)**:
|
4. **Create MCP Tool (`mcp-server/src/tools/`)**:
|
||||||
- Create a new file (e.g., `your-command.js`) using **kebab-case**.
|
- Create a new file (e.g., `your-command.js`) using **kebab-case**.
|
||||||
- Import `zod`, `handleApiResult`, `createErrorResponse`, `getProjectRootFromSession`, and your `yourCommandDirect` function. Import `AsyncOperationManager` if needed.
|
- Import `zod`, `handleApiResult`, `createErrorResponse`, **`getProjectRootFromSession`**, and your `yourCommandDirect` function.
|
||||||
- Implement `registerYourCommandTool(server)`.
|
- Implement `registerYourCommandTool(server)`.
|
||||||
- Define the tool `name` using **snake_case** (e.g., `your_command`).
|
- Define the tool `name` using **snake_case** (e.g., `your_command`).
|
||||||
- Define the `parameters` using `zod`. Include `projectRoot: z.string().optional()`.
|
- Define the `parameters` using `zod`. **Crucially, define `projectRoot` as optional**: `projectRoot: z.string().optional().describe(...)`. Include `file` if applicable.
|
||||||
- Implement the `async execute(args, { log, session })` method (omitting `reportProgress` from destructuring).
|
- Implement the standard `async execute(args, { log, reportProgress, session })` method:
|
||||||
- Get `rootFolder` using `getProjectRootFromSession(session, log)`.
|
- Get `rootFolder` using `getProjectRootFromSession` (with fallback to `args.projectRoot`).
|
||||||
- **Determine Execution Strategy**:
|
- Call `yourCommandDirect({ ...args, projectRoot: rootFolder }, log)`.
|
||||||
- **If using `AsyncOperationManager`**: Create the operation, call the `*Direct` function from within the async task callback (passing `log` and `{ session }`), report progress *from the callback*, and return the initial `ACCEPTED` response.
|
- Pass the result to `handleApiResult(result, log, 'Error Message')`.
|
||||||
- **If calling `*Direct` function synchronously** (like `add-task`): Call `await yourCommandDirect({ ...args, projectRoot }, log, { session });`. Handle the result with `handleApiResult`.
|
|
||||||
- ❌ **DON'T**: Pass `reportProgress` down to the direct function in either case.
|
|
||||||
|
|
||||||
5. **Register Tool**: Import and call `registerYourCommandTool` in `mcp-server/src/tools/index.js`.
|
5. **Register Tool**: Import and call `registerYourCommandTool` in `mcp-server/src/tools/index.js`.
|
||||||
|
|
||||||
|
|||||||
@@ -34,9 +34,9 @@ The standard pattern for adding a feature follows this workflow:
|
|||||||
## Critical Checklist for New Features
|
## Critical Checklist for New Features
|
||||||
|
|
||||||
- **Comprehensive Function Exports**:
|
- **Comprehensive Function Exports**:
|
||||||
- ✅ **DO**: Export **all core functions, helper functions (like `generateSubtaskPrompt`), and utility methods** needed by your new function or command from their respective modules.
|
- ✅ **DO**: Export all helper functions and utility methods needed by your new function
|
||||||
- ✅ **DO**: **Explicitly review the module's `export { ... }` block** at the bottom of the file to ensure every required dependency (even seemingly minor helpers like `findTaskById`, `taskExists`, specific prompt generators, AI call handlers, etc.) is included.
|
- ✅ **DO**: Review dependencies and ensure functions like `findTaskById`, `taskExists` are exported
|
||||||
- ❌ **DON'T**: Assume internal functions are already exported - **always verify**. A missing export will cause runtime errors (e.g., `ReferenceError: generateSubtaskPrompt is not defined`).
|
- ❌ **DON'T**: Assume internal functions are already exported - always check and add them explicitly
|
||||||
- **Example**: If implementing a feature that checks task existence, ensure the helper function is in exports:
|
- **Example**: If implementing a feature that checks task existence, ensure the helper function is in exports:
|
||||||
```javascript
|
```javascript
|
||||||
// At the bottom of your module file:
|
// At the bottom of your module file:
|
||||||
@@ -45,21 +45,14 @@ The standard pattern for adding a feature follows this workflow:
|
|||||||
yourNewFunction,
|
yourNewFunction,
|
||||||
taskExists, // Helper function used by yourNewFunction
|
taskExists, // Helper function used by yourNewFunction
|
||||||
findTaskById, // Helper function used by yourNewFunction
|
findTaskById, // Helper function used by yourNewFunction
|
||||||
generateSubtaskPrompt, // Helper needed by expand/add features
|
|
||||||
getSubtasksFromAI, // Helper needed by expand/add features
|
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
- **Parameter Completeness and Matching**:
|
- **Parameter Completeness**:
|
||||||
- ✅ **DO**: Pass all required parameters to functions you call within your implementation
|
- ✅ **DO**: Pass all required parameters to functions you call within your implementation
|
||||||
- ✅ **DO**: Check function signatures before implementing calls to them
|
- ✅ **DO**: Check function signatures before implementing calls to them
|
||||||
- ✅ **DO**: Verify that direct function parameters match their core function counterparts
|
|
||||||
- ✅ **DO**: When implementing a direct function for MCP, ensure it only accepts parameters that exist in the core function
|
|
||||||
- ✅ **DO**: Verify the expected *internal structure* of complex object parameters (like the `mcpLog` object, see mcp.mdc for the required logger wrapper pattern)
|
|
||||||
- ❌ **DON'T**: Add parameters to direct functions that don't exist in core functions
|
|
||||||
- ❌ **DON'T**: Assume default parameter values will handle missing arguments
|
- ❌ **DON'T**: Assume default parameter values will handle missing arguments
|
||||||
- ❌ **DON'T**: Assume object parameters will work without verifying their required internal structure or methods.
|
- **Example**: When calling file generation, pass both required parameters:
|
||||||
- **Example**: When calling file generation, pass all required parameters:
|
|
||||||
```javascript
|
```javascript
|
||||||
// ✅ DO: Pass all required parameters
|
// ✅ DO: Pass all required parameters
|
||||||
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
|
||||||
@@ -67,59 +60,12 @@ The standard pattern for adding a feature follows this workflow:
|
|||||||
// ❌ DON'T: Omit required parameters
|
// ❌ DON'T: Omit required parameters
|
||||||
await generateTaskFiles(tasksPath); // Error - missing outputDir parameter
|
await generateTaskFiles(tasksPath); // Error - missing outputDir parameter
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example**: Properly match direct function parameters to core function:
|
|
||||||
```javascript
|
|
||||||
// Core function signature
|
|
||||||
async function expandTask(tasksPath, taskId, numSubtasks, useResearch = false, additionalContext = '', options = {}) {
|
|
||||||
// Implementation...
|
|
||||||
}
|
|
||||||
|
|
||||||
// ✅ DO: Match direct function parameters to core function
|
|
||||||
export async function expandTaskDirect(args, log, context = {}) {
|
|
||||||
// Extract only parameters that exist in the core function
|
|
||||||
const taskId = parseInt(args.id, 10);
|
|
||||||
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
|
||||||
const useResearch = args.research === true;
|
|
||||||
const additionalContext = args.prompt || '';
|
|
||||||
|
|
||||||
// Call core function with matched parameters
|
|
||||||
const result = await expandTask(
|
|
||||||
tasksPath,
|
|
||||||
taskId,
|
|
||||||
numSubtasks,
|
|
||||||
useResearch,
|
|
||||||
additionalContext,
|
|
||||||
{ mcpLog: log, session: context.session }
|
|
||||||
);
|
|
||||||
|
|
||||||
// Return result
|
|
||||||
return { success: true, data: result, fromCache: false };
|
|
||||||
}
|
|
||||||
|
|
||||||
// ❌ DON'T: Use parameters that don't exist in the core function
|
|
||||||
export async function expandTaskDirect(args, log, context = {}) {
|
|
||||||
// DON'T extract parameters that don't exist in the core function!
|
|
||||||
const force = args.force === true; // ❌ WRONG - 'force' doesn't exist in core function
|
|
||||||
|
|
||||||
// DON'T pass non-existent parameters to core functions
|
|
||||||
const result = await expandTask(
|
|
||||||
tasksPath,
|
|
||||||
args.id,
|
|
||||||
args.num,
|
|
||||||
args.research,
|
|
||||||
args.prompt,
|
|
||||||
force, // ❌ WRONG - this parameter doesn't exist in the core function
|
|
||||||
{ mcpLog: log }
|
|
||||||
);
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Consistent File Path Handling**:
|
- **Consistent File Path Handling**:
|
||||||
- ✅ DO: Use consistent file naming conventions: `task_${id.toString().padStart(3, '0')}.txt`
|
- ✅ **DO**: Use consistent file naming conventions: `task_${id.toString().padStart(3, '0')}.txt`
|
||||||
- ✅ DO: Use `path.join()` for composing file paths
|
- ✅ **DO**: Use `path.join()` for composing file paths
|
||||||
- ✅ DO: Use appropriate file extensions (.txt for tasks, .json for data)
|
- ✅ **DO**: Use appropriate file extensions (.txt for tasks, .json for data)
|
||||||
- ❌ DON'T: Hardcode path separators or inconsistent file extensions
|
- ❌ **DON'T**: Hardcode path separators or inconsistent file extensions
|
||||||
- **Example**: Creating file paths for tasks:
|
- **Example**: Creating file paths for tasks:
|
||||||
```javascript
|
```javascript
|
||||||
// ✅ DO: Use consistent file naming and path.join
|
// ✅ DO: Use consistent file naming and path.join
|
||||||
@@ -133,10 +79,10 @@ The standard pattern for adding a feature follows this workflow:
|
|||||||
```
|
```
|
||||||
|
|
||||||
- **Error Handling and Reporting**:
|
- **Error Handling and Reporting**:
|
||||||
- ✅ DO: Use structured error objects with code and message properties
|
- ✅ **DO**: Use structured error objects with code and message properties
|
||||||
- ✅ DO: Include clear error messages identifying the specific problem
|
- ✅ **DO**: Include clear error messages identifying the specific problem
|
||||||
- ✅ DO: Handle both function-specific errors and potential file system errors
|
- ✅ **DO**: Handle both function-specific errors and potential file system errors
|
||||||
- ✅ DO: Log errors at appropriate severity levels
|
- ✅ **DO**: Log errors at appropriate severity levels
|
||||||
- **Example**: Structured error handling in core functions:
|
- **Example**: Structured error handling in core functions:
|
||||||
```javascript
|
```javascript
|
||||||
try {
|
try {
|
||||||
@@ -152,43 +98,33 @@ The standard pattern for adding a feature follows this workflow:
|
|||||||
```
|
```
|
||||||
|
|
||||||
- **Silent Mode Implementation**:
|
- **Silent Mode Implementation**:
|
||||||
- ✅ **DO**: Import all silent mode utilities together:
|
- ✅ **DO**: Import silent mode utilities in direct functions: `import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';`
|
||||||
```javascript
|
- ✅ **DO**: Wrap core function calls with silent mode:
|
||||||
import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';
|
```javascript
|
||||||
```
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
- ✅ **DO**: Always use `isSilentMode()` function to check global silent mode status, never reference global variables.
|
enableSilentMode();
|
||||||
- ✅ **DO**: Wrap core function calls **within direct functions** using `enableSilentMode()` and `disableSilentMode()` in a `try/finally` block if the core function might produce console output (like banners, spinners, direct `console.log`s) that isn't reliably controlled by an `outputFormat` parameter.
|
|
||||||
```javascript
|
// Call the core function
|
||||||
// Direct Function Example:
|
const result = await coreFunction(...);
|
||||||
try {
|
|
||||||
// Prefer passing 'json' if the core function reliably handles it
|
// Restore normal logging
|
||||||
const result = await coreFunction(...args, 'json');
|
disableSilentMode();
|
||||||
// OR, if outputFormat is not enough/unreliable:
|
```
|
||||||
// enableSilentMode(); // Enable *before* the call
|
- ✅ **DO**: Ensure silent mode is disabled in error handling:
|
||||||
// const result = await coreFunction(...args);
|
```javascript
|
||||||
// disableSilentMode(); // Disable *after* the call (typically in finally)
|
try {
|
||||||
|
enableSilentMode();
|
||||||
return { success: true, data: result };
|
// Core function call
|
||||||
} catch (error) {
|
disableSilentMode();
|
||||||
log.error(`Error: ${error.message}`);
|
} catch (error) {
|
||||||
return { success: false, error: { message: error.message } };
|
// Make sure to restore normal logging even if there's an error
|
||||||
} finally {
|
disableSilentMode();
|
||||||
// If you used enable/disable, ensure disable is called here
|
throw error; // Rethrow to be caught by outer catch block
|
||||||
// disableSilentMode();
|
}
|
||||||
}
|
```
|
||||||
```
|
- ✅ **DO**: Add silent mode handling in all direct functions that call core functions
|
||||||
- ✅ **DO**: Core functions themselves *should* ideally check `outputFormat === 'text'` before displaying UI elements (banners, spinners, boxes) and use internal logging (`log`/`report`) that respects silent mode. The `enable/disableSilentMode` wrapper in the direct function is a safety net.
|
- ❌ **DON'T**: Forget to disable silent mode, which would suppress all future logs
|
||||||
- ✅ **DO**: Handle mixed parameter/global silent mode correctly for functions accepting both (less common now, prefer `outputFormat`):
|
- ❌ **DON'T**: Enable silent mode outside of direct functions in the MCP server
|
||||||
```javascript
|
|
||||||
// Check both the passed parameter and global silent mode
|
|
||||||
const isSilent = silentMode || (typeof silentMode === 'undefined' && isSilentMode());
|
|
||||||
```
|
|
||||||
- ❌ **DON'T**: Forget to disable silent mode in a `finally` block if you enabled it.
|
|
||||||
- ❌ **DON'T**: Access the global `silentMode` flag directly.
|
|
||||||
|
|
||||||
- **Debugging Strategy**:
|
|
||||||
- ✅ **DO**: If an MCP tool fails with vague errors (e.g., JSON parsing issues like `Unexpected token ... is not valid JSON`), **try running the equivalent CLI command directly in the terminal** (e.g., `task-master expand --all`). CLI output often provides much more specific error messages (like missing function definitions or stack traces from the core logic) that pinpoint the root cause.
|
|
||||||
- ❌ **DON'T**: Rely solely on MCP logs if the error is unclear; use the CLI as a complementary debugging tool for core logic issues.
|
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// 1. CORE LOGIC: Add function to appropriate module (example in task-manager.js)
|
// 1. CORE LOGIC: Add function to appropriate module (example in task-manager.js)
|
||||||
|
|||||||
@@ -10,8 +10,6 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
|
|
||||||
**Note:** For interacting with Taskmaster programmatically or via integrated tools, using the **MCP tools is strongly recommended** due to better performance, structured data, and error handling. The CLI commands serve as a user-friendly alternative and fallback. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for MCP implementation details and [`commands.mdc`](mdc:.cursor/rules/commands.mdc) for CLI implementation guidelines.
|
**Note:** For interacting with Taskmaster programmatically or via integrated tools, using the **MCP tools is strongly recommended** due to better performance, structured data, and error handling. The CLI commands serve as a user-friendly alternative and fallback. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for MCP implementation details and [`commands.mdc`](mdc:.cursor/rules/commands.mdc) for CLI implementation guidelines.
|
||||||
|
|
||||||
**Important:** Several MCP tools involve AI processing and are long-running operations that may take up to a minute to complete. When using these tools, always inform users that the operation is in progress and to wait patiently for results. The AI-powered tools include: `parse_prd`, `analyze_project_complexity`, `update_subtask`, `update_task`, `update`, `expand_all`, `expand_task`, and `add_task`.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Initialization & Setup
|
## Initialization & Setup
|
||||||
@@ -36,8 +34,8 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
* `skipInstall`: `Skip installing dependencies (default: false).` (CLI: `--skip-install`)
|
* `skipInstall`: `Skip installing dependencies (default: false).` (CLI: `--skip-install`)
|
||||||
* `addAliases`: `Add shell aliases (tm, taskmaster) (default: false).` (CLI: `--aliases`)
|
* `addAliases`: `Add shell aliases (tm, taskmaster) (default: false).` (CLI: `--aliases`)
|
||||||
* `yes`: `Skip prompts and use defaults/provided arguments (default: false).` (CLI: `-y, --yes`)
|
* `yes`: `Skip prompts and use defaults/provided arguments (default: false).` (CLI: `-y, --yes`)
|
||||||
* **Usage:** Run this once at the beginning of a new project, typically via an integrated tool like Cursor. Operates on the current working directory of the MCP server.
|
* **Usage:** Run this once at the beginning of a new project, typically via an integrated tool like Cursor. Operates on the current working directory of the MCP server.
|
||||||
* **Important:** Once complete, you *MUST* parse a prd in order to generate tasks. There will be no tasks files until then. The next step after initializing should be to create a PRD using the example PRD in scripts/example_prd.txt.
|
|
||||||
|
|
||||||
### 2. Parse PRD (`parse_prd`)
|
### 2. Parse PRD (`parse_prd`)
|
||||||
|
|
||||||
@@ -51,7 +49,6 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
* `force`: `Use this to allow Taskmaster to overwrite an existing 'tasks.json' without asking for confirmation.` (CLI: `-f, --force`)
|
* `force`: `Use this to allow Taskmaster to overwrite an existing 'tasks.json' without asking for confirmation.` (CLI: `-f, --force`)
|
||||||
* **Usage:** Useful for bootstrapping a project from an existing requirements document.
|
* **Usage:** Useful for bootstrapping a project from an existing requirements document.
|
||||||
* **Notes:** Task Master will strictly adhere to any specific requirements mentioned in the PRD (libraries, database schemas, frameworks, tech stacks, etc.) while filling in any gaps where the PRD isn't fully specified. Tasks are designed to provide the most direct implementation path while avoiding over-engineering.
|
* **Notes:** Task Master will strictly adhere to any specific requirements mentioned in the PRD (libraries, database schemas, frameworks, tech stacks, etc.) while filling in any gaps where the PRD isn't fully specified. Tasks are designed to provide the most direct implementation path while avoiding over-engineering.
|
||||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress. If the user does not have a PRD, suggest discussing their idea and then use the example PRD in scripts/example_prd.txt as a template for creating the PRD based on their idea, for use with parse-prd.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -102,7 +99,6 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
* `priority`: `Set the priority for the new task ('high', 'medium', 'low'; default: 'medium').` (CLI: `--priority <priority>`)
|
* `priority`: `Set the priority for the new task ('high', 'medium', 'low'; default: 'medium').` (CLI: `--priority <priority>`)
|
||||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||||
* **Usage:** Quickly add newly identified tasks during development.
|
* **Usage:** Quickly add newly identified tasks during development.
|
||||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
|
||||||
|
|
||||||
### 7. Add Subtask (`add_subtask`)
|
### 7. Add Subtask (`add_subtask`)
|
||||||
|
|
||||||
@@ -131,8 +127,7 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
* `prompt`: `Required. Explain the change or new context for Taskmaster to apply to the tasks (e.g., "We are now using React Query instead of Redux Toolkit for data fetching").` (CLI: `-p, --prompt <text>`)
|
* `prompt`: `Required. Explain the change or new context for Taskmaster to apply to the tasks (e.g., "We are now using React Query instead of Redux Toolkit for data fetching").` (CLI: `-p, --prompt <text>`)
|
||||||
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates based on external knowledge (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates based on external knowledge (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||||
* **Usage:** Handle significant implementation changes or pivots that affect multiple future tasks. Example CLI: `task-master update --from='18' --prompt='Switching to React Query.\nNeed to refactor data fetching...'`
|
* **Usage:** Handle significant implementation changes or pivots that affect multiple future tasks.
|
||||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
|
||||||
|
|
||||||
### 9. Update Task (`update_task`)
|
### 9. Update Task (`update_task`)
|
||||||
|
|
||||||
@@ -144,21 +139,19 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
* `prompt`: `Required. Explain the specific changes or provide the new information Taskmaster should incorporate into this task.` (CLI: `-p, --prompt <text>`)
|
* `prompt`: `Required. Explain the specific changes or provide the new information Taskmaster should incorporate into this task.` (CLI: `-p, --prompt <text>`)
|
||||||
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||||
* **Usage:** Refine a specific task based on new understanding or feedback. Example CLI: `task-master update-task --id='15' --prompt='Clarification: Use PostgreSQL instead of MySQL.\nUpdate schema details...'`
|
* **Usage:** Refine a specific task based on new understanding or feedback.
|
||||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
|
||||||
|
|
||||||
### 10. Update Subtask (`update_subtask`)
|
### 10. Update Subtask (`update_subtask`)
|
||||||
|
|
||||||
* **MCP Tool:** `update_subtask`
|
* **MCP Tool:** `update_subtask`
|
||||||
* **CLI Command:** `task-master update-subtask [options]`
|
* **CLI Command:** `task-master update-subtask [options]`
|
||||||
* **Description:** `Append timestamped notes or details to a specific Taskmaster subtask without overwriting existing content. Intended for iterative implementation logging.`
|
* **Description:** `Append timestamped notes or details to a specific Taskmaster subtask without overwriting existing content.`
|
||||||
* **Key Parameters/Options:**
|
* **Key Parameters/Options:**
|
||||||
* `id`: `Required. The specific ID of the Taskmaster subtask (e.g., '15.2') you want to add information to.` (CLI: `-i, --id <id>`)
|
* `id`: `Required. The specific ID of the Taskmaster subtask (e.g., '15.2') you want to add information to.` (CLI: `-i, --id <id>`)
|
||||||
* `prompt`: `Required. Provide the information or notes Taskmaster should append to the subtask's details. Ensure this adds *new* information not already present.` (CLI: `-p, --prompt <text>`)
|
* `prompt`: `Required. Provide the information or notes Taskmaster should append to the subtask's details.` (CLI: `-p, --prompt <text>`)
|
||||||
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
* `research`: `Enable Taskmaster to use Perplexity AI for more informed updates (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||||
* **Usage:** Add implementation notes, code snippets, or clarifications to a subtask during development. Before calling, review the subtask's current details to append only fresh insights, helping to build a detailed log of the implementation journey and avoid redundancy. Example CLI: `task-master update-subtask --id='15.2' --prompt='Discovered that the API requires header X.\nImplementation needs adjustment...'`
|
* **Usage:** Add implementation notes, code snippets, or clarifications to a subtask during development.
|
||||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
|
||||||
|
|
||||||
### 11. Set Task Status (`set_task_status`)
|
### 11. Set Task Status (`set_task_status`)
|
||||||
|
|
||||||
@@ -200,7 +193,6 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
* `force`: `Use this to make Taskmaster replace existing subtasks with newly generated ones.` (CLI: `--force`)
|
* `force`: `Use this to make Taskmaster replace existing subtasks with newly generated ones.` (CLI: `--force`)
|
||||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||||
* **Usage:** Generate a detailed implementation plan for a complex task before starting coding.
|
* **Usage:** Generate a detailed implementation plan for a complex task before starting coding.
|
||||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
|
||||||
|
|
||||||
### 14. Expand All Tasks (`expand_all`)
|
### 14. Expand All Tasks (`expand_all`)
|
||||||
|
|
||||||
@@ -214,7 +206,6 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
* `force`: `Make Taskmaster replace existing subtasks.` (CLI: `--force`)
|
* `force`: `Make Taskmaster replace existing subtasks.` (CLI: `--force`)
|
||||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||||
* **Usage:** Useful after initial task generation or complexity analysis to break down multiple tasks at once.
|
* **Usage:** Useful after initial task generation or complexity analysis to break down multiple tasks at once.
|
||||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
|
||||||
|
|
||||||
### 15. Clear Subtasks (`clear_subtasks`)
|
### 15. Clear Subtasks (`clear_subtasks`)
|
||||||
|
|
||||||
@@ -287,67 +278,45 @@ This document provides a detailed reference for interacting with Taskmaster, cov
|
|||||||
|
|
||||||
## Analysis & Reporting
|
## Analysis & Reporting
|
||||||
|
|
||||||
### 21. Analyze Project Complexity (`analyze_project_complexity`)
|
### 21. Analyze Complexity (`analyze_complexity`)
|
||||||
|
|
||||||
* **MCP Tool:** `analyze_project_complexity`
|
* **MCP Tool:** `analyze_complexity`
|
||||||
* **CLI Command:** `task-master analyze-complexity [options]`
|
* **CLI Command:** `task-master analyze-complexity [options]`
|
||||||
* **Description:** `Have Taskmaster analyze your tasks to determine their complexity and suggest which ones need to be broken down further.`
|
* **Description:** `Let Taskmaster analyze the complexity of your tasks and generate a report with recommendations for which ones need breaking down.`
|
||||||
* **Key Parameters/Options:**
|
* **Key Parameters/Options:**
|
||||||
* `output`: `Where to save the complexity analysis report (default: 'scripts/task-complexity-report.json').` (CLI: `-o, --output <file>`)
|
* `output`: `Where Taskmaster should save the JSON complexity analysis report (default: 'scripts/task-complexity-report.json').` (CLI: `-o, --output <file>`)
|
||||||
* `threshold`: `The minimum complexity score (1-10) that should trigger a recommendation to expand a task.` (CLI: `-t, --threshold <number>`)
|
* `threshold`: `The minimum complexity score (1-10) for Taskmaster to recommend expanding a task.` (CLI: `-t, --threshold <number>`)
|
||||||
* `research`: `Enable Perplexity AI for more accurate complexity analysis (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
* `research`: `Enable Taskmaster to use Perplexity AI for more informed complexity analysis (requires PERPLEXITY_API_KEY).` (CLI: `-r, --research`)
|
||||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||||
* **Usage:** Used before breaking down tasks to identify which ones need the most attention.
|
* **Usage:** Identify which tasks are likely too large and need further breakdown before implementation.
|
||||||
* **Important:** This MCP tool makes AI calls and can take up to a minute to complete. Please inform users to hang tight while the operation is in progress.
|
|
||||||
|
|
||||||
### 22. View Complexity Report (`complexity_report`)
|
### 22. Complexity Report (`complexity_report`)
|
||||||
|
|
||||||
* **MCP Tool:** `complexity_report`
|
* **MCP Tool:** `complexity_report`
|
||||||
* **CLI Command:** `task-master complexity-report [options]`
|
* **CLI Command:** `task-master complexity-report [options]`
|
||||||
* **Description:** `Display the task complexity analysis report in a readable format.`
|
* **Description:** `Display the Taskmaster task complexity analysis report generated by 'analyze-complexity'.`
|
||||||
* **Key Parameters/Options:**
|
* **Key Parameters/Options:**
|
||||||
* `file`: `Path to the complexity report (default: 'scripts/task-complexity-report.json').` (CLI: `-f, --file <file>`)
|
* `file`: `Path to the JSON complexity report file (default: 'scripts/task-complexity-report.json').` (CLI: `-f, --file <file>`)
|
||||||
* **Usage:** Review and understand the complexity analysis results after running analyze-complexity.
|
* **Usage:** View the formatted results of the complexity analysis to guide task expansion.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## File Management
|
## File Generation
|
||||||
|
|
||||||
### 23. Generate Task Files (`generate`)
|
### 23. Generate Task Files (`generate`)
|
||||||
|
|
||||||
* **MCP Tool:** `generate`
|
* **MCP Tool:** `generate`
|
||||||
* **CLI Command:** `task-master generate [options]`
|
* **CLI Command:** `task-master generate [options]`
|
||||||
* **Description:** `Create or update individual Markdown files for each task based on your tasks.json.`
|
* **Description:** `Generate individual markdown files for each task and subtask defined in your Taskmaster 'tasks.json'.`
|
||||||
* **Key Parameters/Options:**
|
* **Key Parameters/Options:**
|
||||||
* `output`: `The directory where Taskmaster should save the task files (default: in a 'tasks' directory).` (CLI: `-o, --output <directory>`)
|
* `file`: `Path to your Taskmaster 'tasks.json' file containing the task data (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
||||||
* `file`: `Path to your Taskmaster 'tasks.json' file (default relies on auto-detection).` (CLI: `-f, --file <file>`)
|
* `output`: `The directory where Taskmaster should save the generated markdown task files (default: 'tasks').` (CLI: `-o, --output <dir>`)
|
||||||
* **Usage:** Run this after making changes to tasks.json to keep individual task files up to date.
|
* **Usage:** Create/update the individual `.md` files in the `tasks/` directory, useful for tracking changes in git or viewing tasks individually.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Environment Variables Configuration
|
## Configuration & Metadata
|
||||||
|
|
||||||
Taskmaster's behavior can be customized via environment variables. These affect both CLI and MCP server operation:
|
- **Environment Variables**: Taskmaster relies on environment variables for configuration (API keys, model preferences, default settings). See [`dev_workflow.mdc`](mdc:.cursor/rules/dev_workflow.mdc) or the project README for a list.
|
||||||
|
- **`tasks.json`**: The core data file containing the array of tasks and their details. See [`tasks.mdc`](mdc:.cursor/rules/tasks.mdc) for details.
|
||||||
* **ANTHROPIC_API_KEY** (Required): Your Anthropic API key for Claude.
|
- **`task_xxx.md` files**: Individual markdown files generated by the `generate` command/tool, reflecting the content of `tasks.json`.
|
||||||
* **MODEL**: Claude model to use (default: `claude-3-opus-20240229`).
|
|
||||||
* **MAX_TOKENS**: Maximum tokens for AI responses (default: 8192).
|
|
||||||
* **TEMPERATURE**: Temperature for AI model responses (default: 0.7).
|
|
||||||
* **DEBUG**: Enable debug logging (`true`/`false`, default: `false`).
|
|
||||||
* **LOG_LEVEL**: Console output level (`debug`, `info`, `warn`, `error`, default: `info`).
|
|
||||||
* **DEFAULT_SUBTASKS**: Default number of subtasks for `expand` (default: 5).
|
|
||||||
* **DEFAULT_PRIORITY**: Default priority for new tasks (default: `medium`).
|
|
||||||
* **PROJECT_NAME**: Project name used in metadata.
|
|
||||||
* **PROJECT_VERSION**: Project version used in metadata.
|
|
||||||
* **PERPLEXITY_API_KEY**: API key for Perplexity AI (for `--research` flags).
|
|
||||||
* **PERPLEXITY_MODEL**: Perplexity model to use (default: `sonar-medium-online`).
|
|
||||||
|
|
||||||
Set these in your `.env` file in the project root or in your environment before running Taskmaster.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
For implementation details:
|
|
||||||
* CLI commands: See [`commands.mdc`](mdc:.cursor/rules/commands.mdc)
|
|
||||||
* MCP server: See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc)
|
|
||||||
* Task structure: See [`tasks.mdc`](mdc:.cursor/rules/tasks.mdc)
|
|
||||||
* Workflow: See [`dev_workflow.mdc`](mdc:.cursor/rules/dev_workflow.mdc)
|
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ globs: "**/*.test.js,tests/**/*"
|
|||||||
|
|
||||||
# Testing Guidelines for Task Master CLI
|
# Testing Guidelines for Task Master CLI
|
||||||
|
|
||||||
*Note:* Never use asynchronous operations in tests. Always mock tests properly based on the way the tested functions are defined and used. Do not arbitrarily create tests. Based them on the low-level details and execution of the underlying code being tested.
|
|
||||||
|
|
||||||
## Test Organization Structure
|
## Test Organization Structure
|
||||||
|
|
||||||
- **Unit Tests** (See [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc) for module breakdown)
|
- **Unit Tests** (See [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc) for module breakdown)
|
||||||
@@ -90,122 +88,6 @@ describe('Feature or Function Name', () => {
|
|||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
## Commander.js Command Testing Best Practices
|
|
||||||
|
|
||||||
When testing CLI commands built with Commander.js, several special considerations must be made to avoid common pitfalls:
|
|
||||||
|
|
||||||
- **Direct Action Handler Testing**
|
|
||||||
- ✅ **DO**: Test the command action handlers directly rather than trying to mock the entire Commander.js chain
|
|
||||||
- ✅ **DO**: Create simplified test-specific implementations of command handlers that match the original behavior
|
|
||||||
- ✅ **DO**: Explicitly handle all options, including defaults and shorthand flags (e.g., `-p` for `--prompt`)
|
|
||||||
- ✅ **DO**: Include null/undefined checks in test implementations for parameters that might be optional
|
|
||||||
- ✅ **DO**: Use fixtures from `tests/fixtures/` for consistent sample data across tests
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ✅ DO: Create a simplified test version of the command handler
|
|
||||||
const testAddTaskAction = async (options) => {
|
|
||||||
options = options || {}; // Ensure options aren't undefined
|
|
||||||
|
|
||||||
// Validate parameters
|
|
||||||
const isManualCreation = options.title && options.description;
|
|
||||||
const prompt = options.prompt || options.p; // Handle shorthand flags
|
|
||||||
|
|
||||||
if (!prompt && !isManualCreation) {
|
|
||||||
throw new Error('Expected error message');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the mocked task manager
|
|
||||||
return mockTaskManager.addTask(/* parameters */);
|
|
||||||
};
|
|
||||||
|
|
||||||
test('should handle required parameters correctly', async () => {
|
|
||||||
// Call the test implementation directly
|
|
||||||
await expect(async () => {
|
|
||||||
await testAddTaskAction({ file: 'tasks.json' });
|
|
||||||
}).rejects.toThrow('Expected error message');
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Commander Chain Mocking (If Necessary)**
|
|
||||||
- ✅ **DO**: Mock ALL chainable methods (`option`, `argument`, `action`, `on`, etc.)
|
|
||||||
- ✅ **DO**: Return `this` (or the mock object) from all chainable method mocks
|
|
||||||
- ✅ **DO**: Remember to mock not only the initial object but also all objects returned by methods
|
|
||||||
- ✅ **DO**: Implement a mechanism to capture the action handler for direct testing
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// If you must mock the Commander.js chain:
|
|
||||||
const mockCommand = {
|
|
||||||
command: jest.fn().mockReturnThis(),
|
|
||||||
description: jest.fn().mockReturnThis(),
|
|
||||||
option: jest.fn().mockReturnThis(),
|
|
||||||
argument: jest.fn().mockReturnThis(), // Don't forget this one
|
|
||||||
action: jest.fn(fn => {
|
|
||||||
actionHandler = fn; // Capture the handler for testing
|
|
||||||
return mockCommand;
|
|
||||||
}),
|
|
||||||
on: jest.fn().mockReturnThis() // Don't forget this one
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Parameter Handling**
|
|
||||||
- ✅ **DO**: Check for both main flag and shorthand flags (e.g., `prompt` and `p`)
|
|
||||||
- ✅ **DO**: Handle parameters like Commander would (comma-separated lists, etc.)
|
|
||||||
- ✅ **DO**: Set proper default values as defined in the command
|
|
||||||
- ✅ **DO**: Validate that required parameters are actually required in tests
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// Parse dependencies like Commander would
|
|
||||||
const dependencies = options.dependencies
|
|
||||||
? options.dependencies.split(',').map(id => id.trim())
|
|
||||||
: [];
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Environment and Session Handling**
|
|
||||||
- ✅ **DO**: Properly mock session objects when required by functions
|
|
||||||
- ✅ **DO**: Reset environment variables between tests if modified
|
|
||||||
- ✅ **DO**: Use a consistent pattern for environment-dependent tests
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// Session parameter mock pattern
|
|
||||||
const sessionMock = { session: process.env };
|
|
||||||
|
|
||||||
// In test:
|
|
||||||
expect(mockAddTask).toHaveBeenCalledWith(
|
|
||||||
expect.any(String),
|
|
||||||
'Test prompt',
|
|
||||||
[],
|
|
||||||
'medium',
|
|
||||||
sessionMock,
|
|
||||||
false,
|
|
||||||
null,
|
|
||||||
null
|
|
||||||
);
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Common Pitfalls to Avoid**
|
|
||||||
- ❌ **DON'T**: Try to use the real action implementation without proper mocking
|
|
||||||
- ❌ **DON'T**: Mock Commander partially - either mock it completely or test the action directly
|
|
||||||
- ❌ **DON'T**: Forget to handle optional parameters that may be undefined
|
|
||||||
- ❌ **DON'T**: Neglect to test shorthand flag functionality (e.g., `-p`, `-r`)
|
|
||||||
- ❌ **DON'T**: Create circular dependencies in your test mocks
|
|
||||||
- ❌ **DON'T**: Access variables before initialization in your test implementations
|
|
||||||
- ❌ **DON'T**: Include actual command execution in unit tests
|
|
||||||
- ❌ **DON'T**: Overwrite the same file path in multiple tests
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ❌ DON'T: Create circular references in mocks
|
|
||||||
const badMock = {
|
|
||||||
method: jest.fn().mockImplementation(() => badMock.method())
|
|
||||||
};
|
|
||||||
|
|
||||||
// ❌ DON'T: Access uninitialized variables
|
|
||||||
const badImplementation = () => {
|
|
||||||
const result = uninitialized;
|
|
||||||
let uninitialized = 'value';
|
|
||||||
return result;
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
## Jest Module Mocking Best Practices
|
## Jest Module Mocking Best Practices
|
||||||
|
|
||||||
- **Mock Hoisting Behavior**
|
- **Mock Hoisting Behavior**
|
||||||
@@ -670,102 +552,6 @@ npm test -- -t "pattern to match"
|
|||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
## Testing AI Service Integrations
|
|
||||||
|
|
||||||
- **DO NOT import real AI service clients**
|
|
||||||
- ❌ DON'T: Import actual AI clients from their libraries
|
|
||||||
- ✅ DO: Create fully mocked versions that return predictable responses
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ❌ DON'T: Import and instantiate real AI clients
|
|
||||||
import { Anthropic } from '@anthropic-ai/sdk';
|
|
||||||
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
|
|
||||||
|
|
||||||
// ✅ DO: Mock the entire module with controlled behavior
|
|
||||||
jest.mock('@anthropic-ai/sdk', () => ({
|
|
||||||
Anthropic: jest.fn().mockImplementation(() => ({
|
|
||||||
messages: {
|
|
||||||
create: jest.fn().mockResolvedValue({
|
|
||||||
content: [{ type: 'text', text: 'Mocked AI response' }]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
}));
|
|
||||||
```
|
|
||||||
|
|
||||||
- **DO NOT rely on environment variables for API keys**
|
|
||||||
- ❌ DON'T: Assume environment variables are set in tests
|
|
||||||
- ✅ DO: Set mock environment variables in test setup
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// In tests/setup.js or at the top of test file
|
|
||||||
process.env.ANTHROPIC_API_KEY = 'test-mock-api-key-for-tests';
|
|
||||||
process.env.PERPLEXITY_API_KEY = 'test-mock-perplexity-key-for-tests';
|
|
||||||
```
|
|
||||||
|
|
||||||
- **DO NOT use real AI client initialization logic**
|
|
||||||
- ❌ DON'T: Use code that attempts to initialize or validate real AI clients
|
|
||||||
- ✅ DO: Create test-specific paths that bypass client initialization
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ❌ DON'T: Test functions that require valid AI client initialization
|
|
||||||
// This will fail without proper API keys or network access
|
|
||||||
test('should use AI client', async () => {
|
|
||||||
const result = await functionThatInitializesAIClient();
|
|
||||||
expect(result).toBeDefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
// ✅ DO: Test with bypassed initialization or manual task paths
|
|
||||||
test('should handle manual task creation without AI', () => {
|
|
||||||
// Using a path that doesn't require AI client initialization
|
|
||||||
const result = addTaskDirect({
|
|
||||||
title: 'Manual Task',
|
|
||||||
description: 'Test Description'
|
|
||||||
}, mockLogger);
|
|
||||||
|
|
||||||
expect(result.success).toBe(true);
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Testing Asynchronous Code
|
|
||||||
|
|
||||||
- **DO NOT rely on asynchronous operations in tests**
|
|
||||||
- ❌ DON'T: Use real async/await or Promise resolution in tests
|
|
||||||
- ✅ DO: Make all mocks return synchronous values when possible
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ❌ DON'T: Use real async functions that might fail unpredictably
|
|
||||||
test('should handle async operation', async () => {
|
|
||||||
const result = await realAsyncFunction(); // Can time out or fail for external reasons
|
|
||||||
expect(result).toBe(expectedValue);
|
|
||||||
});
|
|
||||||
|
|
||||||
// ✅ DO: Make async operations synchronous in tests
|
|
||||||
test('should handle operation', () => {
|
|
||||||
mockAsyncFunction.mockReturnValue({ success: true, data: 'test' });
|
|
||||||
const result = functionUnderTest();
|
|
||||||
expect(result).toEqual({ success: true, data: 'test' });
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
- **DO NOT test exact error messages**
|
|
||||||
- ❌ DON'T: Assert on exact error message text that might change
|
|
||||||
- ✅ DO: Test for error presence and general properties
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ❌ DON'T: Test for exact error message text
|
|
||||||
expect(result.error).toBe('Could not connect to API: Network error');
|
|
||||||
|
|
||||||
// ✅ DO: Test for general error properties or message patterns
|
|
||||||
expect(result.success).toBe(false);
|
|
||||||
expect(result.error).toContain('Could not connect');
|
|
||||||
// Or even better:
|
|
||||||
expect(result).toMatchObject({
|
|
||||||
success: false,
|
|
||||||
error: expect.stringContaining('connect')
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
## Reliable Testing Techniques
|
## Reliable Testing Techniques
|
||||||
|
|
||||||
- **Create Simplified Test Functions**
|
- **Create Simplified Test Functions**
|
||||||
@@ -778,125 +564,99 @@ npm test -- -t "pattern to match"
|
|||||||
const setTaskStatus = async (taskId, newStatus) => {
|
const setTaskStatus = async (taskId, newStatus) => {
|
||||||
const tasksPath = 'tasks/tasks.json';
|
const tasksPath = 'tasks/tasks.json';
|
||||||
const data = await readJSON(tasksPath);
|
const data = await readJSON(tasksPath);
|
||||||
// [implementation]
|
// Update task status logic
|
||||||
await writeJSON(tasksPath, data);
|
await writeJSON(tasksPath, data);
|
||||||
return { success: true };
|
return data;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Test-friendly version (easier to test)
|
// Test-friendly simplified function (easy to test)
|
||||||
const updateTaskStatus = (tasks, taskId, newStatus) => {
|
const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||||
// Pure logic without side effects
|
// Same core logic without file operations
|
||||||
const updatedTasks = [...tasks];
|
// Update task status logic on provided tasksData object
|
||||||
const taskIndex = findTaskById(updatedTasks, taskId);
|
return tasksData; // Return updated data for assertions
|
||||||
if (taskIndex === -1) return { success: false, error: 'Task not found' };
|
|
||||||
updatedTasks[taskIndex].status = newStatus;
|
|
||||||
return { success: true, tasks: updatedTasks };
|
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- **Avoid Real File System Operations**
|
||||||
|
- Never write to real files during tests
|
||||||
|
- Create test-specific versions of file operation functions
|
||||||
|
- Mock all file system operations including read, write, exists, etc.
|
||||||
|
- Verify function behavior using the in-memory data structures
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Mock file operations
|
||||||
|
const mockReadJSON = jest.fn();
|
||||||
|
const mockWriteJSON = jest.fn();
|
||||||
|
|
||||||
|
jest.mock('../../scripts/modules/utils.js', () => ({
|
||||||
|
readJSON: mockReadJSON,
|
||||||
|
writeJSON: mockWriteJSON,
|
||||||
|
}));
|
||||||
|
|
||||||
|
test('should update task status correctly', () => {
|
||||||
|
// Setup mock data
|
||||||
|
const testData = JSON.parse(JSON.stringify(sampleTasks));
|
||||||
|
mockReadJSON.mockReturnValue(testData);
|
||||||
|
|
||||||
|
// Call the function that would normally modify files
|
||||||
|
const result = testSetTaskStatus(testData, '1', 'done');
|
||||||
|
|
||||||
|
// Assert on the in-memory data structure
|
||||||
|
expect(result.tasks[0].status).toBe('done');
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Data Isolation Between Tests**
|
||||||
|
- Always create fresh copies of test data for each test
|
||||||
|
- Use `JSON.parse(JSON.stringify(original))` for deep cloning
|
||||||
|
- Reset all mocks before each test with `jest.clearAllMocks()`
|
||||||
|
- Avoid state that persists between tests
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
// Deep clone the test data
|
||||||
|
testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Test All Path Variations**
|
||||||
|
- Regular tasks and subtasks
|
||||||
|
- Single items and multiple items
|
||||||
|
- Success paths and error paths
|
||||||
|
- Edge cases (empty data, invalid inputs, etc.)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Multiple test cases covering different scenarios
|
||||||
|
test('should update regular task status', () => {
|
||||||
|
/* test implementation */
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should update subtask status', () => {
|
||||||
|
/* test implementation */
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should update multiple tasks when given comma-separated IDs', () => {
|
||||||
|
/* test implementation */
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should throw error for non-existent task ID', () => {
|
||||||
|
/* test implementation */
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Stabilize Tests With Predictable Input/Output**
|
||||||
|
- Use consistent, predictable test fixtures
|
||||||
|
- Avoid random values or time-dependent data
|
||||||
|
- Make tests deterministic for reliable CI/CD
|
||||||
|
- Control all variables that might affect test outcomes
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Use a specific known date instead of current date
|
||||||
|
const fixedDate = new Date('2023-01-01T12:00:00Z');
|
||||||
|
jest.spyOn(global, 'Date').mockImplementation(() => fixedDate);
|
||||||
|
```
|
||||||
|
|
||||||
See [tests/README.md](mdc:tests/README.md) for more details on the testing approach.
|
See [tests/README.md](mdc:tests/README.md) for more details on the testing approach.
|
||||||
|
|
||||||
Refer to [jest.config.js](mdc:jest.config.js) for Jest configuration options.
|
Refer to [jest.config.js](mdc:jest.config.js) for Jest configuration options.
|
||||||
|
|
||||||
## Variable Hoisting and Module Initialization Issues
|
|
||||||
|
|
||||||
When testing ES modules or working with complex module imports, you may encounter variable hoisting and initialization issues. These can be particularly tricky to debug and often appear as "Cannot access 'X' before initialization" errors.
|
|
||||||
|
|
||||||
- **Understanding Module Initialization Order**
|
|
||||||
- ✅ **DO**: Declare and initialize global variables at the top of modules
|
|
||||||
- ✅ **DO**: Use proper function declarations to avoid hoisting issues
|
|
||||||
- ✅ **DO**: Initialize variables before they are referenced, especially in imported modules
|
|
||||||
- ✅ **DO**: Be aware that imports are hoisted to the top of the file
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ✅ DO: Define global state variables at the top of the module
|
|
||||||
let silentMode = false; // Declare and initialize first
|
|
||||||
|
|
||||||
const CONFIG = { /* configuration */ };
|
|
||||||
|
|
||||||
function isSilentMode() {
|
|
||||||
return silentMode; // Reference variable after it's initialized
|
|
||||||
}
|
|
||||||
|
|
||||||
function log(level, message) {
|
|
||||||
if (isSilentMode()) return; // Use the function instead of accessing variable directly
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Testing Modules with Initialization-Dependent Functions**
|
|
||||||
- ✅ **DO**: Create test-specific implementations that initialize all variables correctly
|
|
||||||
- ✅ **DO**: Use factory functions in mocks to ensure proper initialization order
|
|
||||||
- ✅ **DO**: Be careful with how you mock or stub functions that depend on module state
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ✅ DO: Test-specific implementation that avoids initialization issues
|
|
||||||
const testLog = (level, ...args) => {
|
|
||||||
// Local implementation with proper initialization
|
|
||||||
const isSilent = false; // Explicit initialization
|
|
||||||
if (isSilent) return;
|
|
||||||
// Test implementation...
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Common Hoisting-Related Errors to Avoid**
|
|
||||||
- ❌ **DON'T**: Reference variables before their declaration in module scope
|
|
||||||
- ❌ **DON'T**: Create circular dependencies between modules
|
|
||||||
- ❌ **DON'T**: Rely on variable initialization order across module boundaries
|
|
||||||
- ❌ **DON'T**: Define functions that use hoisted variables before they're initialized
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ❌ DON'T: Create reference-before-initialization patterns
|
|
||||||
function badFunction() {
|
|
||||||
if (silentMode) { /* ... */ } // ReferenceError if silentMode is declared later
|
|
||||||
}
|
|
||||||
|
|
||||||
let silentMode = false;
|
|
||||||
|
|
||||||
// ❌ DON'T: Create cross-module references that depend on initialization order
|
|
||||||
// module-a.js
|
|
||||||
import { getSetting } from './module-b.js';
|
|
||||||
export const config = { value: getSetting() };
|
|
||||||
|
|
||||||
// module-b.js
|
|
||||||
import { config } from './module-a.js';
|
|
||||||
export function getSetting() {
|
|
||||||
return config.value; // Circular dependency causing initialization issues
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Dynamic Imports as a Solution**
|
|
||||||
- ✅ **DO**: Use dynamic imports (`import()`) to avoid initialization order issues
|
|
||||||
- ✅ **DO**: Structure modules to avoid circular dependencies that cause initialization issues
|
|
||||||
- ✅ **DO**: Consider factory functions for modules with complex state
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ✅ DO: Use dynamic imports to avoid initialization issues
|
|
||||||
async function getTaskManager() {
|
|
||||||
return import('./task-manager.js');
|
|
||||||
}
|
|
||||||
|
|
||||||
async function someFunction() {
|
|
||||||
const taskManager = await getTaskManager();
|
|
||||||
return taskManager.someMethod();
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Testing Approach for Modules with Initialization Issues**
|
|
||||||
- ✅ **DO**: Create self-contained test implementations rather than using real implementations
|
|
||||||
- ✅ **DO**: Mock dependencies at module boundaries instead of trying to mock deep dependencies
|
|
||||||
- ✅ **DO**: Isolate module-specific state in tests
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ✅ DO: Create isolated test implementation instead of reusing module code
|
|
||||||
test('should log messages when not in silent mode', () => {
|
|
||||||
// Local test implementation instead of importing from module
|
|
||||||
const testLog = (level, message) => {
|
|
||||||
if (false) return; // Always non-silent for this test
|
|
||||||
mockConsole(level, message);
|
|
||||||
};
|
|
||||||
|
|
||||||
testLog('info', 'test message');
|
|
||||||
expect(mockConsole).toHaveBeenCalledWith('info', 'test message');
|
|
||||||
});
|
|
||||||
```
|
|
||||||
@@ -109,29 +109,6 @@ alwaysApply: false
|
|||||||
- ✅ DO: Use appropriate icons for different log levels
|
- ✅ DO: Use appropriate icons for different log levels
|
||||||
- ✅ DO: Respect the configured log level
|
- ✅ DO: Respect the configured log level
|
||||||
- ❌ DON'T: Add direct console.log calls outside the logging utility
|
- ❌ DON'T: Add direct console.log calls outside the logging utility
|
||||||
- **Note on Passed Loggers**: When a logger object (like the FastMCP `log` object) is passed *as a parameter* (e.g., as `mcpLog`) into core Task Master functions, the receiving function often expects specific methods (`.info`, `.warn`, `.error`, etc.) to be directly callable on that object (e.g., `mcpLog[level](...)`). If the passed logger doesn't have this exact structure, a wrapper object may be needed. See the **Handling Logging Context (`mcpLog`)** section in [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for the standard pattern used in direct functions.
|
|
||||||
|
|
||||||
- **Logger Wrapper Pattern**:
|
|
||||||
- ✅ DO: Use the logger wrapper pattern when passing loggers to prevent `mcpLog[level] is not a function` errors:
|
|
||||||
```javascript
|
|
||||||
// Standard logWrapper pattern to wrap FastMCP's log object
|
|
||||||
const logWrapper = {
|
|
||||||
info: (message, ...args) => log.info(message, ...args),
|
|
||||||
warn: (message, ...args) => log.warn(message, ...args),
|
|
||||||
error: (message, ...args) => log.error(message, ...args),
|
|
||||||
debug: (message, ...args) => log.debug && log.debug(message, ...args),
|
|
||||||
success: (message, ...args) => log.info(message, ...args) // Map success to info
|
|
||||||
};
|
|
||||||
|
|
||||||
// Pass this wrapper as mcpLog to ensure consistent method availability
|
|
||||||
// This also ensures output format is set to 'json' in many core functions
|
|
||||||
const options = { mcpLog: logWrapper, session };
|
|
||||||
```
|
|
||||||
- ✅ DO: Implement this pattern in any direct function that calls core functions expecting `mcpLog`
|
|
||||||
- ✅ DO: Use this solution in conjunction with silent mode for complete output control
|
|
||||||
- ❌ DON'T: Pass the FastMCP `log` object directly as `mcpLog` to core functions
|
|
||||||
- **Important**: This pattern has successfully fixed multiple issues in MCP tools (e.g., `update-task`, `update-subtask`) where using or omitting `mcpLog` incorrectly led to runtime errors or JSON parsing failures.
|
|
||||||
- For complete implementation details, see the **Handling Logging Context (`mcpLog`)** section in [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc).
|
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// ✅ DO: Implement a proper logging utility
|
// ✅ DO: Implement a proper logging utility
|
||||||
@@ -158,107 +135,6 @@ alwaysApply: false
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Silent Mode Utilities (in `scripts/modules/utils.js`)
|
|
||||||
|
|
||||||
- **Silent Mode Control**:
|
|
||||||
- ✅ DO: Use the exported silent mode functions rather than accessing global variables
|
|
||||||
- ✅ DO: Always use `isSilentMode()` to check the current silent mode state
|
|
||||||
- ✅ DO: Ensure silent mode is disabled in a `finally` block to prevent it from staying enabled
|
|
||||||
- ❌ DON'T: Access the global `silentMode` variable directly
|
|
||||||
- ❌ DON'T: Forget to disable silent mode after enabling it
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
// ✅ DO: Use the silent mode control functions properly
|
|
||||||
|
|
||||||
// Example of proper implementation in utils.js:
|
|
||||||
|
|
||||||
// Global silent mode flag (private to the module)
|
|
||||||
let silentMode = false;
|
|
||||||
|
|
||||||
// Enable silent mode
|
|
||||||
function enableSilentMode() {
|
|
||||||
silentMode = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable silent mode
|
|
||||||
function disableSilentMode() {
|
|
||||||
silentMode = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if silent mode is enabled
|
|
||||||
function isSilentMode() {
|
|
||||||
return silentMode;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example of proper usage in another module:
|
|
||||||
import { enableSilentMode, disableSilentMode, isSilentMode } from './utils.js';
|
|
||||||
|
|
||||||
// Check current status
|
|
||||||
if (!isSilentMode()) {
|
|
||||||
console.log('Silent mode is not enabled');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use try/finally pattern to ensure silent mode is disabled
|
|
||||||
try {
|
|
||||||
enableSilentMode();
|
|
||||||
// Do something that should suppress console output
|
|
||||||
performOperation();
|
|
||||||
} finally {
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Integration with Logging**:
|
|
||||||
- ✅ DO: Make the `log` function respect silent mode
|
|
||||||
```javascript
|
|
||||||
function log(level, ...args) {
|
|
||||||
// Skip logging if silent mode is enabled
|
|
||||||
if (isSilentMode()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rest of logging logic...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Common Patterns for Silent Mode**:
|
|
||||||
- ✅ DO: In **direct functions** (`mcp-server/src/core/direct-functions/*`) that call **core functions** (`scripts/modules/*`), ensure console output from the core function is suppressed to avoid breaking MCP JSON responses.
|
|
||||||
- **Preferred Method**: Update the core function to accept an `outputFormat` parameter (e.g., `outputFormat = 'text'`) and make it check `outputFormat === 'text'` before displaying any UI elements (banners, spinners, boxes, direct `console.log`s). Pass `'json'` from the direct function.
|
|
||||||
- **Necessary Fallback/Guarantee**: If the core function *cannot* be modified or its output suppression via `outputFormat` is unreliable, **wrap the core function call within the direct function** using `enableSilentMode()` and `disableSilentMode()` in a `try/finally` block. This acts as a safety net.
|
|
||||||
```javascript
|
|
||||||
// Example in a direct function
|
|
||||||
export async function someOperationDirect(args, log) {
|
|
||||||
let result;
|
|
||||||
const tasksPath = findTasksJsonPath(args, log); // Get path first
|
|
||||||
|
|
||||||
// Option 1: Core function handles 'json' format (Preferred)
|
|
||||||
try {
|
|
||||||
result = await coreFunction(tasksPath, ...otherArgs, 'json'); // Pass 'json'
|
|
||||||
return { success: true, data: result, fromCache: false };
|
|
||||||
} catch (error) {
|
|
||||||
// Handle error...
|
|
||||||
}
|
|
||||||
|
|
||||||
// Option 2: Core function output unreliable (Fallback/Guarantee)
|
|
||||||
try {
|
|
||||||
enableSilentMode(); // Enable before call
|
|
||||||
result = await coreFunction(tasksPath, ...otherArgs); // Call without format param
|
|
||||||
} catch (error) {
|
|
||||||
// Handle error...
|
|
||||||
log.error(`Failed: ${error.message}`);
|
|
||||||
return { success: false, error: { /* ... */ } };
|
|
||||||
} finally {
|
|
||||||
disableSilentMode(); // ALWAYS disable in finally
|
|
||||||
}
|
|
||||||
return { success: true, data: result, fromCache: false }; // Assuming success if no error caught
|
|
||||||
}
|
|
||||||
```
|
|
||||||
- ✅ DO: For functions that accept a silent mode parameter but also need to check global state (less common):
|
|
||||||
```javascript
|
|
||||||
// Check both the passed parameter and global silent mode
|
|
||||||
const isSilent = options.silentMode || (typeof options.silentMode === 'undefined' && isSilentMode());
|
|
||||||
```
|
|
||||||
|
|
||||||
## File Operations (in `scripts/modules/utils.js`)
|
## File Operations (in `scripts/modules/utils.js`)
|
||||||
|
|
||||||
- **Error Handling**:
|
- **Error Handling**:
|
||||||
|
|||||||
22
.env.example
22
.env.example
@@ -1,20 +1,20 @@
|
|||||||
# API Keys (Required)
|
# API Keys (Required)
|
||||||
ANTHROPIC_API_KEY=your_anthropic_api_key_here # Format: sk-ant-api03-...
|
ANTHROPIC_API_KEY=your_anthropic_api_key_here # Format: sk-ant-api03-...
|
||||||
PERPLEXITY_API_KEY=your_perplexity_api_key_here # Format: pplx-...
|
PERPLEXITY_API_KEY=your_perplexity_api_key_here # Format: pplx-...
|
||||||
|
|
||||||
# Model Configuration
|
# Model Configuration
|
||||||
MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229
|
MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229
|
||||||
PERPLEXITY_MODEL=sonar-pro # Perplexity model for research-backed subtasks
|
PERPLEXITY_MODEL=sonar-pro # Perplexity model for research-backed subtasks
|
||||||
MAX_TOKENS=64000 # Maximum tokens for model responses
|
MAX_TOKENS=64000 # Maximum tokens for model responses
|
||||||
TEMPERATURE=0.2 # Temperature for model responses (0.0-1.0)
|
TEMPERATURE=0.4 # Temperature for model responses (0.0-1.0)
|
||||||
|
|
||||||
# Logging Configuration
|
# Logging Configuration
|
||||||
DEBUG=false # Enable debug logging (true/false)
|
DEBUG=false # Enable debug logging (true/false)
|
||||||
LOG_LEVEL=info # Log level (debug, info, warn, error)
|
LOG_LEVEL=info # Log level (debug, info, warn, error)
|
||||||
|
|
||||||
# Task Generation Settings
|
# Task Generation Settings
|
||||||
DEFAULT_SUBTASKS=5 # Default number of subtasks when expanding
|
DEFAULT_SUBTASKS=4 # Default number of subtasks when expanding
|
||||||
DEFAULT_PRIORITY=medium # Default priority for generated tasks (high, medium, low)
|
DEFAULT_PRIORITY=medium # Default priority for generated tasks (high, medium, low)
|
||||||
|
|
||||||
# Project Metadata (Optional)
|
# Project Metadata (Optional)
|
||||||
PROJECT_NAME=Your Project Name # Override default project name in tasks.json
|
PROJECT_NAME=Your Project Name # Override default project name in tasks.json
|
||||||
39
.github/ISSUE_TEMPLATE/bug_report.md
vendored
39
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,39 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: 'bug: '
|
|
||||||
labels: bug
|
|
||||||
assignees: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
### Description
|
|
||||||
|
|
||||||
Detailed description of the problem, including steps to reproduce the issue.
|
|
||||||
|
|
||||||
### Steps to Reproduce
|
|
||||||
|
|
||||||
1. Step-by-step instructions to reproduce the issue
|
|
||||||
2. Include command examples or UI interactions
|
|
||||||
|
|
||||||
### Expected Behavior
|
|
||||||
|
|
||||||
Describe clearly what the expected outcome or behavior should be.
|
|
||||||
|
|
||||||
### Actual Behavior
|
|
||||||
|
|
||||||
Describe clearly what the actual outcome or behavior is.
|
|
||||||
|
|
||||||
### Screenshots or Logs
|
|
||||||
|
|
||||||
Provide screenshots, logs, or error messages if applicable.
|
|
||||||
|
|
||||||
### Environment
|
|
||||||
|
|
||||||
- Task Master version:
|
|
||||||
- Node.js version:
|
|
||||||
- Operating system:
|
|
||||||
- IDE (if applicable):
|
|
||||||
|
|
||||||
### Additional Context
|
|
||||||
|
|
||||||
Any additional information or context that might help diagnose the issue.
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
---
|
|
||||||
name: Enhancements & feature requests
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
title: 'feat: '
|
|
||||||
labels: enhancement
|
|
||||||
assignees: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
> "Direct quote or clear summary of user request or need or user story."
|
|
||||||
|
|
||||||
### Motivation
|
|
||||||
|
|
||||||
Detailed explanation of why this feature is important. Describe the problem it solves or the benefit it provides.
|
|
||||||
|
|
||||||
### Proposed Solution
|
|
||||||
|
|
||||||
Clearly describe the proposed feature, including:
|
|
||||||
|
|
||||||
- High-level overview of the feature
|
|
||||||
- Relevant technologies or integrations
|
|
||||||
- How it fits into the existing workflow or architecture
|
|
||||||
|
|
||||||
### High-Level Workflow
|
|
||||||
|
|
||||||
1. Step-by-step description of how the feature will be implemented
|
|
||||||
2. Include necessary intermediate milestones
|
|
||||||
|
|
||||||
### Key Elements
|
|
||||||
|
|
||||||
- Bullet-point list of technical or UX/UI enhancements
|
|
||||||
- Mention specific integrations or APIs
|
|
||||||
- Highlight changes needed in existing data models or commands
|
|
||||||
|
|
||||||
### Example Workflow
|
|
||||||
|
|
||||||
Provide a clear, concrete example demonstrating the feature:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ task-master [action]
|
|
||||||
→ Expected response/output
|
|
||||||
```
|
|
||||||
|
|
||||||
### Implementation Considerations
|
|
||||||
|
|
||||||
- Dependencies on external components or APIs
|
|
||||||
- Backward compatibility requirements
|
|
||||||
- Potential performance impacts or resource usage
|
|
||||||
|
|
||||||
### Out of Scope (Future Considerations)
|
|
||||||
|
|
||||||
Clearly list any features or improvements not included but relevant for future iterations.
|
|
||||||
31
.github/ISSUE_TEMPLATE/feedback.md
vendored
31
.github/ISSUE_TEMPLATE/feedback.md
vendored
@@ -1,31 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feedback
|
|
||||||
about: Give us specific feedback on the product/approach/tech
|
|
||||||
title: 'feedback: '
|
|
||||||
labels: feedback
|
|
||||||
assignees: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
### Feedback Summary
|
|
||||||
|
|
||||||
Provide a clear summary or direct quote from user feedback.
|
|
||||||
|
|
||||||
### User Context
|
|
||||||
|
|
||||||
Explain the user's context or scenario in which this feedback was provided.
|
|
||||||
|
|
||||||
### User Impact
|
|
||||||
|
|
||||||
Describe how this feedback affects the user experience or workflow.
|
|
||||||
|
|
||||||
### Suggestions
|
|
||||||
|
|
||||||
Provide any initial thoughts, potential solutions, or improvements based on the feedback.
|
|
||||||
|
|
||||||
### Relevant Screenshots or Examples
|
|
||||||
|
|
||||||
Attach screenshots, logs, or examples that illustrate the feedback.
|
|
||||||
|
|
||||||
### Additional Notes
|
|
||||||
|
|
||||||
Any additional context or related information.
|
|
||||||
95
.github/workflows/ci.yml
vendored
95
.github/workflows/ci.yml
vendored
@@ -1,95 +0,0 @@
|
|||||||
name: CI
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- next
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- next
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
setup:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: 20
|
|
||||||
cache: 'npm'
|
|
||||||
|
|
||||||
- name: Install Dependencies
|
|
||||||
id: install
|
|
||||||
run: npm ci
|
|
||||||
timeout-minutes: 2
|
|
||||||
|
|
||||||
- name: Cache node_modules
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: node_modules
|
|
||||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
|
||||||
|
|
||||||
format-check:
|
|
||||||
needs: setup
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: 20
|
|
||||||
|
|
||||||
- name: Restore node_modules
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: node_modules
|
|
||||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
|
||||||
|
|
||||||
- name: Format Check
|
|
||||||
run: npm run format-check
|
|
||||||
env:
|
|
||||||
FORCE_COLOR: 1
|
|
||||||
|
|
||||||
test:
|
|
||||||
needs: setup
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: 20
|
|
||||||
|
|
||||||
- name: Restore node_modules
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: node_modules
|
|
||||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
|
||||||
|
|
||||||
- name: Run Tests
|
|
||||||
run: |
|
|
||||||
npm run test:coverage -- --coverageThreshold '{"global":{"branches":0,"functions":0,"lines":0,"statements":0}}' --detectOpenHandles --forceExit
|
|
||||||
env:
|
|
||||||
NODE_ENV: test
|
|
||||||
CI: true
|
|
||||||
FORCE_COLOR: 1
|
|
||||||
timeout-minutes: 10
|
|
||||||
|
|
||||||
- name: Upload Test Results
|
|
||||||
if: always()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: test-results
|
|
||||||
path: |
|
|
||||||
test-results
|
|
||||||
coverage
|
|
||||||
junit.xml
|
|
||||||
retention-days: 30
|
|
||||||
15
.github/workflows/release.yml
vendored
15
.github/workflows/release.yml
vendored
@@ -3,6 +3,7 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
- next
|
||||||
jobs:
|
jobs:
|
||||||
release:
|
release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -14,21 +15,9 @@ jobs:
|
|||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 20
|
node-version: 20
|
||||||
cache: 'npm'
|
|
||||||
|
|
||||||
- name: Cache node_modules
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
node_modules
|
|
||||||
*/*/node_modules
|
|
||||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-node-
|
|
||||||
|
|
||||||
- name: Install Dependencies
|
- name: Install Dependencies
|
||||||
run: npm ci
|
run: npm install
|
||||||
timeout-minutes: 2
|
|
||||||
|
|
||||||
- name: Create Release Pull Request or Publish to npm
|
- name: Create Release Pull Request or Publish to npm
|
||||||
uses: changesets/action@v1
|
uses: changesets/action@v1
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -9,9 +9,6 @@ jspm_packages/
|
|||||||
.env.test.local
|
.env.test.local
|
||||||
.env.production.local
|
.env.production.local
|
||||||
|
|
||||||
# Cursor configuration -- might have ENV variables. Included by default
|
|
||||||
# .cursor/mcp.json
|
|
||||||
|
|
||||||
# Logs
|
# Logs
|
||||||
logs
|
logs
|
||||||
*.log
|
*.log
|
||||||
|
|||||||
@@ -1,7 +0,0 @@
|
|||||||
# Ignore artifacts:
|
|
||||||
build
|
|
||||||
coverage
|
|
||||||
.changeset
|
|
||||||
tasks
|
|
||||||
package-lock.json
|
|
||||||
tests/fixture/*.json
|
|
||||||
11
.prettierrc
11
.prettierrc
@@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"printWidth": 80,
|
|
||||||
"tabWidth": 2,
|
|
||||||
"useTabs": true,
|
|
||||||
"semi": true,
|
|
||||||
"singleQuote": true,
|
|
||||||
"trailingComma": "none",
|
|
||||||
"bracketSpacing": true,
|
|
||||||
"arrowParens": "always",
|
|
||||||
"endOfLine": "lf"
|
|
||||||
}
|
|
||||||
3
.vscode/extensions.json
vendored
3
.vscode/extensions.json
vendored
@@ -1,3 +0,0 @@
|
|||||||
{
|
|
||||||
"recommendations": ["esbenp.prettier-vscode"]
|
|
||||||
}
|
|
||||||
101
CHANGELOG.md
101
CHANGELOG.md
@@ -1,106 +1,5 @@
|
|||||||
# task-master-ai
|
# task-master-ai
|
||||||
|
|
||||||
## 0.12.0
|
|
||||||
|
|
||||||
### Minor Changes
|
|
||||||
|
|
||||||
- [#253](https://github.com/eyaltoledano/claude-task-master/pull/253) [`b2ccd60`](https://github.com/eyaltoledano/claude-task-master/commit/b2ccd605264e47a61451b4c012030ee29011bb40) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add `npx task-master-ai` that runs mcp instead of using `task-master-mcp``
|
|
||||||
|
|
||||||
- [#267](https://github.com/eyaltoledano/claude-task-master/pull/267) [`c17d912`](https://github.com/eyaltoledano/claude-task-master/commit/c17d912237e6caaa2445e934fc48cd4841abf056) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Improve PRD parsing prompt with structured analysis and clearer task generation guidelines. We are testing a new prompt - please provide feedback on your experience.
|
|
||||||
|
|
||||||
### Patch Changes
|
|
||||||
|
|
||||||
- [#243](https://github.com/eyaltoledano/claude-task-master/pull/243) [`454a1d9`](https://github.com/eyaltoledano/claude-task-master/commit/454a1d9d37439c702656eedc0702c2f7a4451517) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - - Fixes shebang issue not allowing task-master to run on certain windows operating systems
|
|
||||||
|
|
||||||
- Resolves #241 #211 #184 #193
|
|
||||||
|
|
||||||
- [#268](https://github.com/eyaltoledano/claude-task-master/pull/268) [`3e872f8`](https://github.com/eyaltoledano/claude-task-master/commit/3e872f8afbb46cd3978f3852b858c233450b9f33) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix remove-task command to handle multiple comma-separated task IDs
|
|
||||||
|
|
||||||
- [#239](https://github.com/eyaltoledano/claude-task-master/pull/239) [`6599cb0`](https://github.com/eyaltoledano/claude-task-master/commit/6599cb0bf9eccecab528207836e9d45b8536e5c2) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Updates the parameter descriptions for update, update-task and update-subtask to ensure the MCP server correctly reaches for the right update command based on what is being updated -- all tasks, one task, or a subtask.
|
|
||||||
|
|
||||||
- [#272](https://github.com/eyaltoledano/claude-task-master/pull/272) [`3aee9bc`](https://github.com/eyaltoledano/claude-task-master/commit/3aee9bc840eb8f31230bd1b761ed156b261cabc4) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Enhance the `parsePRD` to include `--append` flag. This flag allows users to append the parsed PRD to an existing file, making it easier to manage multiple PRD files without overwriting existing content.
|
|
||||||
|
|
||||||
- [#264](https://github.com/eyaltoledano/claude-task-master/pull/264) [`ff8e75c`](https://github.com/eyaltoledano/claude-task-master/commit/ff8e75cded91fb677903040002626f7a82fd5f88) Thanks [@joedanz](https://github.com/joedanz)! - Add quotes around numeric env vars in mcp.json (Windsurf, etc.)
|
|
||||||
|
|
||||||
- [#248](https://github.com/eyaltoledano/claude-task-master/pull/248) [`d99fa00`](https://github.com/eyaltoledano/claude-task-master/commit/d99fa00980fc61695195949b33dcda7781006f90) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - - Fix `task-master init` polluting codebase with new packages inside `package.json` and modifying project `README`
|
|
||||||
|
|
||||||
- Now only initializes with cursor rules, windsurf rules, mcp.json, scripts/example_prd.txt, .gitignore modifications, and `README-task-master.md`
|
|
||||||
|
|
||||||
- [#266](https://github.com/eyaltoledano/claude-task-master/pull/266) [`41b979c`](https://github.com/eyaltoledano/claude-task-master/commit/41b979c23963483e54331015a86e7c5079f657e4) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fixed a bug that prevented the task-master from running in a Linux container
|
|
||||||
|
|
||||||
- [#265](https://github.com/eyaltoledano/claude-task-master/pull/265) [`0eb16d5`](https://github.com/eyaltoledano/claude-task-master/commit/0eb16d5ecbb8402d1318ca9509e9d4087b27fb25) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Remove the need for project name, description, and version. Since we no longer create a package.json for you
|
|
||||||
|
|
||||||
## 0.11.0
|
|
||||||
|
|
||||||
### Minor Changes
|
|
||||||
|
|
||||||
- [#71](https://github.com/eyaltoledano/claude-task-master/pull/71) [`7141062`](https://github.com/eyaltoledano/claude-task-master/commit/71410629ba187776d92a31ea0729b2ff341b5e38) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - - **Easier Ways to Use Taskmaster (CLI & MCP):**
|
|
||||||
- You can now use Taskmaster either by installing it as a standard command-line tool (`task-master`) or as an MCP server directly within integrated development tools like Cursor (using its built-in features). **This makes Taskmaster accessible regardless of your preferred workflow.**
|
|
||||||
- Setting up a new project is simpler in integrated tools, thanks to the new `initialize_project` capability.
|
|
||||||
- **Complete MCP Implementation:**
|
|
||||||
- NOTE: Many MCP clients charge on a per tool basis. In that regard, the most cost-efficient way to use Taskmaster is through the CLI directly. Otherwise, the MCP offers the smoothest and most recommended user experience.
|
|
||||||
- All MCP tools now follow a standardized output format that mimicks RESTful API responses. They are lean JSON responses that are context-efficient. This is a net improvement over the last version which sent the whole CLI output directly, which needlessly wasted tokens.
|
|
||||||
- Added a `remove-task` command to permanently delete tasks you no longer need.
|
|
||||||
- Many new MCP tools are available for managing tasks (updating details, adding/removing subtasks, generating task files, setting status, finding the next task, breaking down complex tasks, handling dependencies, analyzing complexity, etc.), usable both from the command line and integrated tools. **(See the `taskmaster.mdc` reference guide and improved readme for a full list).**
|
|
||||||
- **Better Task Tracking:**
|
|
||||||
- Added a "cancelled" status option for tasks, providing more ways to categorize work.
|
|
||||||
- **Smoother Experience in Integrated Tools:**
|
|
||||||
- Long-running operations (like breaking down tasks or analysis) now run in the background **via an Async Operation Manager** with progress updates, so you know what's happening without waiting and can check status later.
|
|
||||||
- **Improved Documentation:**
|
|
||||||
- Added a comprehensive reference guide (`taskmaster.mdc`) detailing all commands and tools with examples, usage tips, and troubleshooting info. This is mostly for use by the AI but can be useful for human users as well.
|
|
||||||
- Updated the main README with clearer instructions and added a new tutorial/examples guide.
|
|
||||||
- Added documentation listing supported integrated tools (like Cursor).
|
|
||||||
- **Increased Stability & Reliability:**
|
|
||||||
- Using Taskmaster within integrated tools (like Cursor) is now **more stable and the recommended approach.**
|
|
||||||
- Added automated testing (CI) to catch issues earlier, leading to a more reliable tool.
|
|
||||||
- Fixed release process issues to ensure users get the correct package versions when installing or updating via npm.
|
|
||||||
- **Better Command-Line Experience:**
|
|
||||||
- Fixed bugs in the `expand-all` command that could cause **NaN errors or JSON formatting issues (especially when using `--research`).**
|
|
||||||
- Fixed issues with parameter validation in the `analyze-complexity` command (specifically related to the `threshold` parameter).
|
|
||||||
- Made the `add-task` command more consistent by adding standard flags like `--title`, `--description` for manual task creation so you don't have to use `--prompt` and can quickly drop new ideas and stay in your flow.
|
|
||||||
- Improved error messages for incorrect commands or flags, making them easier to understand.
|
|
||||||
- Added confirmation warnings before permanently deleting tasks (`remove-task`) to prevent mistakes. There's a known bug for deleting multiple tasks with comma-separated values. It'll be fixed next release.
|
|
||||||
- Renamed some background tool names used by integrated tools (e.g., `list-tasks` is now `get_tasks`) to be more intuitive if seen in logs or AI interactions.
|
|
||||||
- Smoother project start: **Improved the guidance provided to AI assistants immediately after setup** (related to `init` and `parse-prd` steps). This ensures the AI doesn't go on a tangent deciding its own workflow, and follows the exact process outlined in the Taskmaster workflow.
|
|
||||||
- **Clearer Error Messages:**
|
|
||||||
- When generating subtasks fails, error messages are now clearer, **including specific task IDs and potential suggestions.**
|
|
||||||
- AI fallback from Claude to Perplexity now also works the other way around. If Perplexity is down, will switch to Claude.
|
|
||||||
- **Simplified Setup & Configuration:**
|
|
||||||
- Made it clearer how to configure API keys depending on whether you're using the command-line tool (`.env` file) or an integrated tool (`.cursor/mcp.json` file).
|
|
||||||
- Taskmaster is now better at automatically finding your project files, especially in integrated tools, reducing the need for manual path settings.
|
|
||||||
- Fixed an issue that could prevent Taskmaster from working correctly immediately after initialization in integrated tools (related to how the MCP server was invoked). This should solve the issue most users were experiencing with the last release (0.10.x)
|
|
||||||
- Updated setup templates with clearer examples for API keys.
|
|
||||||
- \*\*For advanced users setting up the MCP server manually, the command is now `npx -y task-master-ai task-master-mcp`.
|
|
||||||
- **Enhanced Performance & AI:**
|
|
||||||
- Updated underlying AI model settings:
|
|
||||||
- **Increased Context Window:** Can now handle larger projects/tasks due to an increased Claude context window (64k -> 128k tokens).
|
|
||||||
- **Reduced AI randomness:** More consistent and predictable AI outputs (temperature 0.4 -> 0.2).
|
|
||||||
- **Updated default AI models:** Uses newer models like `claude-3-7-sonnet-20250219` and Perplexity `sonar-pro` by default.
|
|
||||||
- **More granular breakdown:** Increased the default number of subtasks generated by `expand` to 5 (from 4).
|
|
||||||
- **Consistent defaults:** Set the default priority for new tasks consistently to "medium".
|
|
||||||
- Improved performance when viewing task details in integrated tools by sending less redundant data.
|
|
||||||
- **Documentation Clarity:**
|
|
||||||
- Clarified in documentation that Markdown files (`.md`) can be used for Product Requirements Documents (`parse_prd`).
|
|
||||||
- Improved the description for the `numTasks` option in `parse_prd` for better guidance.
|
|
||||||
- **Improved Visuals (CLI):**
|
|
||||||
- Enhanced the look and feel of progress bars and status updates in the command line.
|
|
||||||
- Added a helpful color-coded progress bar to the task details view (`show` command) to visualize subtask completion.
|
|
||||||
- Made progress bars show a breakdown of task statuses (e.g., how many are pending vs. done).
|
|
||||||
- Made status counts clearer with text labels next to icons.
|
|
||||||
- Prevented progress bars from messing up the display on smaller terminal windows.
|
|
||||||
- Adjusted how progress is calculated for 'deferred' and 'cancelled' tasks in the progress bar, while still showing their distinct status visually.
|
|
||||||
- **Fixes for Integrated Tools:**
|
|
||||||
- Fixed how progress updates are sent to integrated tools, ensuring they display correctly.
|
|
||||||
- Fixed internal issues that could cause errors or invalid JSON responses when using Taskmaster with integrated tools.
|
|
||||||
|
|
||||||
## 0.10.1
|
|
||||||
|
|
||||||
### Patch Changes
|
|
||||||
|
|
||||||
- [#80](https://github.com/eyaltoledano/claude-task-master/pull/80) [`aa185b2`](https://github.com/eyaltoledano/claude-task-master/commit/aa185b28b248b4ca93f9195b502e2f5187868eaa) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Remove non-existent package `@model-context-protocol/sdk`
|
|
||||||
|
|
||||||
- [#45](https://github.com/eyaltoledano/claude-task-master/pull/45) [`757fd47`](https://github.com/eyaltoledano/claude-task-master/commit/757fd478d2e2eff8506ae746c3470c6088f4d944) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add license to repo
|
|
||||||
|
|
||||||
## 0.10.0
|
## 0.10.0
|
||||||
|
|
||||||
### Minor Changes
|
### Minor Changes
|
||||||
|
|||||||
90
LICENSE.md
Normal file
90
LICENSE.md
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# Dual License
|
||||||
|
|
||||||
|
This project is licensed under two separate licenses:
|
||||||
|
|
||||||
|
1. [Business Source License 1.1](#business-source-license-11) (BSL 1.1) for commercial use of Task Master itself
|
||||||
|
2. [Apache License 2.0](#apache-license-20) for all other uses
|
||||||
|
|
||||||
|
## Business Source License 1.1
|
||||||
|
|
||||||
|
Terms: https://mariadb.com/bsl11/
|
||||||
|
|
||||||
|
Licensed Work: Task Master AI
|
||||||
|
Additional Use Grant: You may use Task Master AI to create and commercialize your own projects and products.
|
||||||
|
|
||||||
|
Change Date: 2025-03-30
|
||||||
|
Change License: None
|
||||||
|
|
||||||
|
The Licensed Work is subject to the Business Source License 1.1. If you are interested in using the Licensed Work in a way that competes directly with Task Master, please contact the licensors.
|
||||||
|
|
||||||
|
### Licensor
|
||||||
|
|
||||||
|
- Eyal Toledano (GitHub: @eyaltoledano)
|
||||||
|
- Ralph (GitHub: @Crunchyman-ralph)
|
||||||
|
|
||||||
|
### Commercial Use Restrictions
|
||||||
|
|
||||||
|
This license explicitly restricts certain commercial uses of Task Master AI to the Licensors listed above. Restricted commercial uses include:
|
||||||
|
|
||||||
|
1. Creating commercial products or services that directly compete with Task Master AI
|
||||||
|
2. Selling Task Master AI itself as a service
|
||||||
|
3. Offering Task Master AI's functionality as a commercial managed service
|
||||||
|
4. Reselling or redistributing Task Master AI for a fee
|
||||||
|
|
||||||
|
### Explicitly Permitted Uses
|
||||||
|
|
||||||
|
The following uses are explicitly allowed under this license:
|
||||||
|
|
||||||
|
1. Using Task Master AI to create and commercialize your own projects
|
||||||
|
2. Using Task Master AI in commercial environments for internal development
|
||||||
|
3. Building and selling products or services that were created using Task Master AI
|
||||||
|
4. Using Task Master AI for commercial development as long as you're not selling Task Master AI itself
|
||||||
|
|
||||||
|
### Additional Terms
|
||||||
|
|
||||||
|
1. The right to commercialize Task Master AI itself is exclusively reserved for the Licensors
|
||||||
|
2. No party may create commercial products that directly compete with Task Master AI without explicit written permission
|
||||||
|
3. Forks of this repository are subject to the same restrictions regarding direct competition
|
||||||
|
4. Contributors agree that their contributions will be subject to this same dual licensing structure
|
||||||
|
|
||||||
|
## Apache License 2.0
|
||||||
|
|
||||||
|
For all uses other than those restricted above. See [APACHE-LICENSE](./APACHE-LICENSE) for the full license text.
|
||||||
|
|
||||||
|
### Permitted Use Definition
|
||||||
|
|
||||||
|
You may use Task Master AI for any purpose, including commercial purposes, as long as you are not:
|
||||||
|
|
||||||
|
1. Creating a direct competitor to Task Master AI
|
||||||
|
2. Selling Task Master AI itself as a service
|
||||||
|
3. Redistributing Task Master AI for a fee
|
||||||
|
|
||||||
|
### Requirements for Use
|
||||||
|
|
||||||
|
1. You must include appropriate copyright notices
|
||||||
|
2. You must state significant changes made to the software
|
||||||
|
3. You must preserve all license notices
|
||||||
|
|
||||||
|
## Questions and Commercial Licensing
|
||||||
|
|
||||||
|
For questions about licensing or to inquire about commercial use that may compete with Task Master, please contact:
|
||||||
|
|
||||||
|
- Eyal Toledano (GitHub: @eyaltoledano)
|
||||||
|
- Ralph (GitHub: @Crunchyman-ralph)
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### ✅ Allowed Uses
|
||||||
|
|
||||||
|
- Using Task Master to create a commercial SaaS product
|
||||||
|
- Using Task Master in your company for development
|
||||||
|
- Creating and selling products that were built using Task Master
|
||||||
|
- Using Task Master to generate code for commercial projects
|
||||||
|
- Offering consulting services where you use Task Master
|
||||||
|
|
||||||
|
### ❌ Restricted Uses
|
||||||
|
|
||||||
|
- Creating a competing AI task management tool
|
||||||
|
- Selling access to Task Master as a service
|
||||||
|
- Creating a hosted version of Task Master
|
||||||
|
- Reselling Task Master's functionality
|
||||||
@@ -58,7 +58,6 @@ This will prompt you for project details and set up a new project with the neces
|
|||||||
### Important Notes
|
### Important Notes
|
||||||
|
|
||||||
1. **ES Modules Configuration:**
|
1. **ES Modules Configuration:**
|
||||||
|
|
||||||
- This project uses ES Modules (ESM) instead of CommonJS.
|
- This project uses ES Modules (ESM) instead of CommonJS.
|
||||||
- This is set via `"type": "module"` in your package.json.
|
- This is set via `"type": "module"` in your package.json.
|
||||||
- Use `import/export` syntax instead of `require()`.
|
- Use `import/export` syntax instead of `require()`.
|
||||||
@@ -146,7 +145,7 @@ To enable enhanced task management capabilities directly within Cursor using the
|
|||||||
4. Configure with the following details:
|
4. Configure with the following details:
|
||||||
- Name: "Task Master"
|
- Name: "Task Master"
|
||||||
- Type: "Command"
|
- Type: "Command"
|
||||||
- Command: "npx -y task-master-ai"
|
- Command: "npx -y --package task-master-ai task-master-mcp"
|
||||||
5. Save the settings
|
5. Save the settings
|
||||||
|
|
||||||
Once configured, you can interact with Task Master's task management commands directly through Cursor's interface, providing a more integrated experience.
|
Once configured, you can interact with Task Master's task management commands directly through Cursor's interface, providing a more integrated experience.
|
||||||
|
|||||||
693
README.md
693
README.md
@@ -1,68 +1,58 @@
|
|||||||
# Task Master [](https://github.com/eyaltoledano/claude-task-master/stargazers)
|
# Task Master
|
||||||
|
|
||||||
[](https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml) [](https://badge.fury.io/js/task-master-ai)  [](LICENSE)
|
### by [@eyaltoledano](https://x.com/eyaltoledano)
|
||||||
|
|
||||||
### By [@eyaltoledano](https://x.com/eyaltoledano) & [@RalphEcom](https://x.com/RalphEcom)
|
|
||||||
|
|
||||||
[](https://x.com/eyaltoledano)
|
|
||||||
[](https://x.com/RalphEcom)
|
|
||||||
|
|
||||||
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
||||||
|
|
||||||
|
## Licensing
|
||||||
|
|
||||||
|
Task Master is licensed under the MIT License with Commons Clause. This means you can:
|
||||||
|
|
||||||
|
✅ **Allowed**:
|
||||||
|
|
||||||
|
- Use Task Master for any purpose (personal, commercial, academic)
|
||||||
|
- Modify the code
|
||||||
|
- Distribute copies
|
||||||
|
- Create and sell products built using Task Master
|
||||||
|
|
||||||
|
❌ **Not Allowed**:
|
||||||
|
|
||||||
|
- Sell Task Master itself
|
||||||
|
- Offer Task Master as a hosted service
|
||||||
|
- Create competing products based on Task Master
|
||||||
|
|
||||||
|
See the [LICENSE](LICENSE) file for the complete license text.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
|
- Node.js 14.0.0 or higher
|
||||||
- Anthropic API key (Claude API)
|
- Anthropic API key (Claude API)
|
||||||
|
- Anthropic SDK version 0.39.0 or higher
|
||||||
- OpenAI SDK (for Perplexity API integration, optional)
|
- OpenAI SDK (for Perplexity API integration, optional)
|
||||||
|
|
||||||
## Quick Start
|
## Configuration
|
||||||
|
|
||||||
### Option 1 | MCP (Recommended):
|
The script can be configured through environment variables in a `.env` file at the root of the project:
|
||||||
|
|
||||||
MCP (Model Control Protocol) provides the easiest way to get started with Task Master directly in your editor.
|
### Required Configuration
|
||||||
|
|
||||||
1. **Add the MCP config to your editor** (Cursor recommended, but it works with other text editors):
|
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
||||||
|
|
||||||
```json
|
### Optional Configuration
|
||||||
{
|
|
||||||
"mcpServers": {
|
|
||||||
"taskmaster-ai": {
|
|
||||||
"command": "npx",
|
|
||||||
"args": ["-y", "task-master-ai"],
|
|
||||||
"env": {
|
|
||||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
|
||||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
|
||||||
"MODEL": "claude-3-7-sonnet-20250219",
|
|
||||||
"PERPLEXITY_MODEL": "sonar-pro",
|
|
||||||
"MAX_TOKENS": "64000",
|
|
||||||
"TEMPERATURE": "0.2",
|
|
||||||
"DEFAULT_SUBTASKS": "5",
|
|
||||||
"DEFAULT_PRIORITY": "medium"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Enable the MCP** in your editor
|
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
||||||
|
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
||||||
|
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
||||||
|
- `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation
|
||||||
|
- `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online")
|
||||||
|
- `DEBUG`: Enable debug logging (default: false)
|
||||||
|
- `LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
|
||||||
|
- `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3)
|
||||||
|
- `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium)
|
||||||
|
- `PROJECT_NAME`: Override default project name in tasks.json
|
||||||
|
- `PROJECT_VERSION`: Override default version in tasks.json
|
||||||
|
|
||||||
3. **Prompt the AI** to initialize Task Master:
|
## Installation
|
||||||
|
|
||||||
```
|
|
||||||
Can you please initialize taskmaster-ai into my project?
|
|
||||||
```
|
|
||||||
|
|
||||||
4. **Use common commands** directly through your AI assistant:
|
|
||||||
|
|
||||||
```txt
|
|
||||||
Can you parse my PRD at scripts/prd.txt?
|
|
||||||
What's the next task I should work on?
|
|
||||||
Can you help me implement task 3?
|
|
||||||
Can you help me expand task 4?
|
|
||||||
```
|
|
||||||
|
|
||||||
### Option 2: Using Command Line
|
|
||||||
|
|
||||||
#### Installation
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install globally
|
# Install globally
|
||||||
@@ -72,7 +62,7 @@ npm install -g task-master-ai
|
|||||||
npm install task-master-ai
|
npm install task-master-ai
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Initialize a new project
|
### Initialize a new project
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# If installed globally
|
# If installed globally
|
||||||
@@ -84,7 +74,14 @@ npx task-master-init
|
|||||||
|
|
||||||
This will prompt you for project details and set up a new project with the necessary files and structure.
|
This will prompt you for project details and set up a new project with the necessary files and structure.
|
||||||
|
|
||||||
#### Common Commands
|
### Important Notes
|
||||||
|
|
||||||
|
1. This package uses ES modules. Your package.json should include `"type": "module"`.
|
||||||
|
2. The Anthropic SDK version should be 0.39.0 or higher.
|
||||||
|
|
||||||
|
## Quick Start with Global Commands
|
||||||
|
|
||||||
|
After installing the package globally, you can use these CLI commands from any directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Initialize a new project
|
# Initialize a new project
|
||||||
@@ -103,16 +100,6 @@ task-master next
|
|||||||
task-master generate
|
task-master generate
|
||||||
```
|
```
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
For more detailed information, check out the documentation in the `docs` directory:
|
|
||||||
|
|
||||||
- [Configuration Guide](docs/configuration.md) - Set up environment variables and customize Task Master
|
|
||||||
- [Tutorial](docs/tutorial.md) - Step-by-step guide to getting started with Task Master
|
|
||||||
- [Command Reference](docs/command-reference.md) - Complete list of all available commands
|
|
||||||
- [Task Structure](docs/task-structure.md) - Understanding the task format and features
|
|
||||||
- [Example Interactions](docs/examples.md) - Common Cursor AI interaction examples
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### If `task-master init` doesn't respond:
|
### If `task-master init` doesn't respond:
|
||||||
@@ -131,31 +118,577 @@ cd claude-task-master
|
|||||||
node scripts/init.js
|
node scripts/init.js
|
||||||
```
|
```
|
||||||
|
|
||||||
## Contributors
|
## Task Structure
|
||||||
|
|
||||||
<a href="https://github.com/eyaltoledano/claude-task-master/graphs/contributors">
|
Tasks in tasks.json have the following structure:
|
||||||
<img src="https://contrib.rocks/image?repo=eyaltoledano/claude-task-master" alt="Task Master project contributors" />
|
|
||||||
</a>
|
|
||||||
|
|
||||||
## Star History
|
- `id`: Unique identifier for the task (Example: `1`)
|
||||||
|
- `title`: Brief, descriptive title of the task (Example: `"Initialize Repo"`)
|
||||||
|
- `description`: Concise description of what the task involves (Example: `"Create a new repository, set up initial structure."`)
|
||||||
|
- `status`: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
|
||||||
|
- `dependencies`: IDs of tasks that must be completed before this task (Example: `[1, 2]`)
|
||||||
|
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
|
||||||
|
- This helps quickly identify which prerequisite tasks are blocking work
|
||||||
|
- `priority`: Importance level of the task (Example: `"high"`, `"medium"`, `"low"`)
|
||||||
|
- `details`: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
|
||||||
|
- `testStrategy`: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
|
||||||
|
- `subtasks`: List of smaller, more specific tasks that make up the main task (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
|
||||||
|
|
||||||
[](https://www.star-history.com/#eyaltoledano/claude-task-master&Timeline)
|
## Integrating with Cursor AI
|
||||||
|
|
||||||
## Licensing
|
Claude Task Master is designed to work seamlessly with [Cursor AI](https://www.cursor.so/), providing a structured workflow for AI-driven development.
|
||||||
|
|
||||||
Task Master is licensed under the MIT License with Commons Clause. This means you can:
|
### Setup with Cursor
|
||||||
|
|
||||||
✅ **Allowed**:
|
1. After initializing your project, open it in Cursor
|
||||||
|
2. The `.cursor/rules/dev_workflow.mdc` file is automatically loaded by Cursor, providing the AI with knowledge about the task management system
|
||||||
|
3. Place your PRD document in the `scripts/` directory (e.g., `scripts/prd.txt`)
|
||||||
|
4. Open Cursor's AI chat and switch to Agent mode
|
||||||
|
|
||||||
- Use Task Master for any purpose (personal, commercial, academic)
|
### Setting up MCP in Cursor
|
||||||
- Modify the code
|
|
||||||
- Distribute copies
|
|
||||||
- Create and sell products built using Task Master
|
|
||||||
|
|
||||||
❌ **Not Allowed**:
|
To enable enhanced task management capabilities directly within Cursor using the Model Control Protocol (MCP):
|
||||||
|
|
||||||
- Sell Task Master itself
|
1. Go to Cursor settings
|
||||||
- Offer Task Master as a hosted service
|
2. Navigate to the MCP section
|
||||||
- Create competing products based on Task Master
|
3. Click on "Add New MCP Server"
|
||||||
|
4. Configure with the following details:
|
||||||
|
- Name: "Task Master"
|
||||||
|
- Type: "Command"
|
||||||
|
- Command: "npx -y --package task-master-ai task-master-mcp"
|
||||||
|
5. Save the settings
|
||||||
|
|
||||||
See the [LICENSE](LICENSE) file for the complete license text and [licensing details](docs/licensing.md) for more information.
|
Once configured, you can interact with Task Master's task management commands directly through Cursor's interface, providing a more integrated experience.
|
||||||
|
|
||||||
|
### Initial Task Generation
|
||||||
|
|
||||||
|
In Cursor's AI chat, instruct the agent to generate tasks from your PRD:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please use the task-master parse-prd command to generate tasks from my PRD. The PRD is located at scripts/prd.txt.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master parse-prd scripts/prd.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
|
||||||
|
- Parse your PRD document
|
||||||
|
- Generate a structured `tasks.json` file with tasks, dependencies, priorities, and test strategies
|
||||||
|
- The agent will understand this process due to the Cursor rules
|
||||||
|
|
||||||
|
### Generate Individual Task Files
|
||||||
|
|
||||||
|
Next, ask the agent to generate individual task files:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please generate individual task files from tasks.json
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master generate
|
||||||
|
```
|
||||||
|
|
||||||
|
This creates individual task files in the `tasks/` directory (e.g., `task_001.txt`, `task_002.txt`), making it easier to reference specific tasks.
|
||||||
|
|
||||||
|
## AI-Driven Development Workflow
|
||||||
|
|
||||||
|
The Cursor agent is pre-configured (via the rules file) to follow this workflow:
|
||||||
|
|
||||||
|
### 1. Task Discovery and Selection
|
||||||
|
|
||||||
|
Ask the agent to list available tasks:
|
||||||
|
|
||||||
|
```
|
||||||
|
What tasks are available to work on next?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will:
|
||||||
|
|
||||||
|
- Run `task-master list` to see all tasks
|
||||||
|
- Run `task-master next` to determine the next task to work on
|
||||||
|
- Analyze dependencies to determine which tasks are ready to be worked on
|
||||||
|
- Prioritize tasks based on priority level and ID order
|
||||||
|
- Suggest the next task(s) to implement
|
||||||
|
|
||||||
|
### 2. Task Implementation
|
||||||
|
|
||||||
|
When implementing a task, the agent will:
|
||||||
|
|
||||||
|
- Reference the task's details section for implementation specifics
|
||||||
|
- Consider dependencies on previous tasks
|
||||||
|
- Follow the project's coding standards
|
||||||
|
- Create appropriate tests based on the task's testStrategy
|
||||||
|
|
||||||
|
You can ask:
|
||||||
|
|
||||||
|
```
|
||||||
|
Let's implement task 3. What does it involve?
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Task Verification
|
||||||
|
|
||||||
|
Before marking a task as complete, verify it according to:
|
||||||
|
|
||||||
|
- The task's specified testStrategy
|
||||||
|
- Any automated tests in the codebase
|
||||||
|
- Manual verification if required
|
||||||
|
|
||||||
|
### 4. Task Completion
|
||||||
|
|
||||||
|
When a task is completed, tell the agent:
|
||||||
|
|
||||||
|
```
|
||||||
|
Task 3 is now complete. Please update its status.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master set-status --id=3 --status=done
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Handling Implementation Drift
|
||||||
|
|
||||||
|
If during implementation, you discover that:
|
||||||
|
|
||||||
|
- The current approach differs significantly from what was planned
|
||||||
|
- Future tasks need to be modified due to current implementation choices
|
||||||
|
- New dependencies or requirements have emerged
|
||||||
|
|
||||||
|
Tell the agent:
|
||||||
|
|
||||||
|
```
|
||||||
|
We've changed our approach. We're now using Express instead of Fastify. Please update all future tasks to reflect this change.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master update --from=4 --prompt="Now we are using Express instead of Fastify."
|
||||||
|
```
|
||||||
|
|
||||||
|
This will rewrite or re-scope subsequent tasks in tasks.json while preserving completed work.
|
||||||
|
|
||||||
|
### 6. Breaking Down Complex Tasks
|
||||||
|
|
||||||
|
For complex tasks that need more granularity:
|
||||||
|
|
||||||
|
```
|
||||||
|
Task 5 seems complex. Can you break it down into subtasks?
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --num=3
|
||||||
|
```
|
||||||
|
|
||||||
|
You can provide additional context:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down task 5 with a focus on security considerations.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --prompt="Focus on security aspects"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also expand all pending tasks:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down all pending tasks into subtasks.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --all
|
||||||
|
```
|
||||||
|
|
||||||
|
For research-backed subtask generation using Perplexity AI:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please break down task 5 using research-backed generation.
|
||||||
|
```
|
||||||
|
|
||||||
|
The agent will execute:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
task-master expand --id=5 --research
|
||||||
|
```
|
||||||
|
|
||||||
|
## Command Reference
|
||||||
|
|
||||||
|
Here's a comprehensive reference of all available commands:
|
||||||
|
|
||||||
|
### Parse PRD
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Parse a PRD file and generate tasks
|
||||||
|
task-master parse-prd <prd-file.txt>
|
||||||
|
|
||||||
|
# Limit the number of tasks generated
|
||||||
|
task-master parse-prd <prd-file.txt> --num-tasks=10
|
||||||
|
```
|
||||||
|
|
||||||
|
### List Tasks
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List all tasks
|
||||||
|
task-master list
|
||||||
|
|
||||||
|
# List tasks with a specific status
|
||||||
|
task-master list --status=<status>
|
||||||
|
|
||||||
|
# List tasks with subtasks
|
||||||
|
task-master list --with-subtasks
|
||||||
|
|
||||||
|
# List tasks with a specific status and include subtasks
|
||||||
|
task-master list --status=<status> --with-subtasks
|
||||||
|
```
|
||||||
|
|
||||||
|
### Show Next Task
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Show the next task to work on based on dependencies and status
|
||||||
|
task-master next
|
||||||
|
```
|
||||||
|
|
||||||
|
### Show Specific Task
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Show details of a specific task
|
||||||
|
task-master show <id>
|
||||||
|
# or
|
||||||
|
task-master show --id=<id>
|
||||||
|
|
||||||
|
# View a specific subtask (e.g., subtask 2 of task 1)
|
||||||
|
task-master show 1.2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Update Tasks
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Update tasks from a specific ID and provide context
|
||||||
|
task-master update --from=<id> --prompt="<prompt>"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Update a Specific Task
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Update a single task by ID with new information
|
||||||
|
task-master update-task --id=<id> --prompt="<prompt>"
|
||||||
|
|
||||||
|
# Use research-backed updates with Perplexity AI
|
||||||
|
task-master update-task --id=<id> --prompt="<prompt>" --research
|
||||||
|
```
|
||||||
|
|
||||||
|
### Update a Subtask
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Append additional information to a specific subtask
|
||||||
|
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>"
|
||||||
|
|
||||||
|
# Example: Add details about API rate limiting to subtask 2 of task 5
|
||||||
|
task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute"
|
||||||
|
|
||||||
|
# Use research-backed updates with Perplexity AI
|
||||||
|
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --research
|
||||||
|
```
|
||||||
|
|
||||||
|
Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content.
|
||||||
|
|
||||||
|
### Remove Task
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Remove a task permanently
|
||||||
|
task-master remove-task --id=<id>
|
||||||
|
|
||||||
|
# Remove a subtask permanently
|
||||||
|
task-master remove-task --id=<parentId.subtaskId>
|
||||||
|
|
||||||
|
# Skip the confirmation prompt
|
||||||
|
task-master remove-task --id=<id> --yes
|
||||||
|
```
|
||||||
|
|
||||||
|
The `remove-task` command permanently deletes a task or subtask from `tasks.json`. It also automatically cleans up any references to the deleted task in other tasks' dependencies. Consider using 'blocked', 'cancelled', or 'deferred' status instead if you want to keep the task for reference.
|
||||||
|
|
||||||
|
### Generate Task Files
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate individual task files from tasks.json
|
||||||
|
task-master generate
|
||||||
|
```
|
||||||
|
|
||||||
|
### Set Task Status
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set status of a single task
|
||||||
|
task-master set-status --id=<id> --status=<status>
|
||||||
|
|
||||||
|
# Set status for multiple tasks
|
||||||
|
task-master set-status --id=1,2,3 --status=<status>
|
||||||
|
|
||||||
|
# Set status for subtasks
|
||||||
|
task-master set-status --id=1.1,1.2 --status=<status>
|
||||||
|
```
|
||||||
|
|
||||||
|
When marking a task as "done", all of its subtasks will automatically be marked as "done" as well.
|
||||||
|
|
||||||
|
### Expand Tasks
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Expand a specific task with subtasks
|
||||||
|
task-master expand --id=<id> --num=<number>
|
||||||
|
|
||||||
|
# Expand with additional context
|
||||||
|
task-master expand --id=<id> --prompt="<context>"
|
||||||
|
|
||||||
|
# Expand all pending tasks
|
||||||
|
task-master expand --all
|
||||||
|
|
||||||
|
# Force regeneration of subtasks for tasks that already have them
|
||||||
|
task-master expand --all --force
|
||||||
|
|
||||||
|
# Research-backed subtask generation for a specific task
|
||||||
|
task-master expand --id=<id> --research
|
||||||
|
|
||||||
|
# Research-backed generation for all tasks
|
||||||
|
task-master expand --all --research
|
||||||
|
```
|
||||||
|
|
||||||
|
### Clear Subtasks
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clear subtasks from a specific task
|
||||||
|
task-master clear-subtasks --id=<id>
|
||||||
|
|
||||||
|
# Clear subtasks from multiple tasks
|
||||||
|
task-master clear-subtasks --id=1,2,3
|
||||||
|
|
||||||
|
# Clear subtasks from all tasks
|
||||||
|
task-master clear-subtasks --all
|
||||||
|
```
|
||||||
|
|
||||||
|
### Analyze Task Complexity
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Analyze complexity of all tasks
|
||||||
|
task-master analyze-complexity
|
||||||
|
|
||||||
|
# Save report to a custom location
|
||||||
|
task-master analyze-complexity --output=my-report.json
|
||||||
|
|
||||||
|
# Use a specific LLM model
|
||||||
|
task-master analyze-complexity --model=claude-3-opus-20240229
|
||||||
|
|
||||||
|
# Set a custom complexity threshold (1-10)
|
||||||
|
task-master analyze-complexity --threshold=6
|
||||||
|
|
||||||
|
# Use an alternative tasks file
|
||||||
|
task-master analyze-complexity --file=custom-tasks.json
|
||||||
|
|
||||||
|
# Use Perplexity AI for research-backed complexity analysis
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
```
|
||||||
|
|
||||||
|
### View Complexity Report
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Display the task complexity analysis report
|
||||||
|
task-master complexity-report
|
||||||
|
|
||||||
|
# View a report at a custom location
|
||||||
|
task-master complexity-report --file=my-report.json
|
||||||
|
```
|
||||||
|
|
||||||
|
### Managing Task Dependencies
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add a dependency to a task
|
||||||
|
task-master add-dependency --id=<id> --depends-on=<id>
|
||||||
|
|
||||||
|
# Remove a dependency from a task
|
||||||
|
task-master remove-dependency --id=<id> --depends-on=<id>
|
||||||
|
|
||||||
|
# Validate dependencies without fixing them
|
||||||
|
task-master validate-dependencies
|
||||||
|
|
||||||
|
# Find and fix invalid dependencies automatically
|
||||||
|
task-master fix-dependencies
|
||||||
|
```
|
||||||
|
|
||||||
|
### Add a New Task
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add a new task using AI
|
||||||
|
task-master add-task --prompt="Description of the new task"
|
||||||
|
|
||||||
|
# Add a task with dependencies
|
||||||
|
task-master add-task --prompt="Description" --dependencies=1,2,3
|
||||||
|
|
||||||
|
# Add a task with priority
|
||||||
|
task-master add-task --prompt="Description" --priority=high
|
||||||
|
```
|
||||||
|
|
||||||
|
## Feature Details
|
||||||
|
|
||||||
|
### Analyzing Task Complexity
|
||||||
|
|
||||||
|
The `analyze-complexity` command:
|
||||||
|
|
||||||
|
- Analyzes each task using AI to assess its complexity on a scale of 1-10
|
||||||
|
- Recommends optimal number of subtasks based on configured DEFAULT_SUBTASKS
|
||||||
|
- Generates tailored prompts for expanding each task
|
||||||
|
- Creates a comprehensive JSON report with ready-to-use commands
|
||||||
|
- Saves the report to scripts/task-complexity-report.json by default
|
||||||
|
|
||||||
|
The generated report contains:
|
||||||
|
|
||||||
|
- Complexity analysis for each task (scored 1-10)
|
||||||
|
- Recommended number of subtasks based on complexity
|
||||||
|
- AI-generated expansion prompts customized for each task
|
||||||
|
- Ready-to-run expansion commands directly within each task analysis
|
||||||
|
|
||||||
|
### Viewing Complexity Report
|
||||||
|
|
||||||
|
The `complexity-report` command:
|
||||||
|
|
||||||
|
- Displays a formatted, easy-to-read version of the complexity analysis report
|
||||||
|
- Shows tasks organized by complexity score (highest to lowest)
|
||||||
|
- Provides complexity distribution statistics (low, medium, high)
|
||||||
|
- Highlights tasks recommended for expansion based on threshold score
|
||||||
|
- Includes ready-to-use expansion commands for each complex task
|
||||||
|
- If no report exists, offers to generate one on the spot
|
||||||
|
|
||||||
|
### Smart Task Expansion
|
||||||
|
|
||||||
|
The `expand` command automatically checks for and uses the complexity report:
|
||||||
|
|
||||||
|
When a complexity report exists:
|
||||||
|
|
||||||
|
- Tasks are automatically expanded using the recommended subtask count and prompts
|
||||||
|
- When expanding all tasks, they're processed in order of complexity (highest first)
|
||||||
|
- Research-backed generation is preserved from the complexity analysis
|
||||||
|
- You can still override recommendations with explicit command-line options
|
||||||
|
|
||||||
|
Example workflow:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the complexity analysis report with research capabilities
|
||||||
|
task-master analyze-complexity --research
|
||||||
|
|
||||||
|
# Review the report in a readable format
|
||||||
|
task-master complexity-report
|
||||||
|
|
||||||
|
# Expand tasks using the optimized recommendations
|
||||||
|
task-master expand --id=8
|
||||||
|
# or expand all tasks
|
||||||
|
task-master expand --all
|
||||||
|
```
|
||||||
|
|
||||||
|
### Finding the Next Task
|
||||||
|
|
||||||
|
The `next` command:
|
||||||
|
|
||||||
|
- Identifies tasks that are pending/in-progress and have all dependencies satisfied
|
||||||
|
- Prioritizes tasks by priority level, dependency count, and task ID
|
||||||
|
- Displays comprehensive information about the selected task:
|
||||||
|
- Basic task details (ID, title, priority, dependencies)
|
||||||
|
- Implementation details
|
||||||
|
- Subtasks (if they exist)
|
||||||
|
- Provides contextual suggested actions:
|
||||||
|
- Command to mark the task as in-progress
|
||||||
|
- Command to mark the task as done
|
||||||
|
- Commands for working with subtasks
|
||||||
|
|
||||||
|
### Viewing Specific Task Details
|
||||||
|
|
||||||
|
The `show` command:
|
||||||
|
|
||||||
|
- Displays comprehensive details about a specific task or subtask
|
||||||
|
- Shows task status, priority, dependencies, and detailed implementation notes
|
||||||
|
- For parent tasks, displays all subtasks and their status
|
||||||
|
- For subtasks, shows parent task relationship
|
||||||
|
- Provides contextual action suggestions based on the task's state
|
||||||
|
- Works with both regular tasks and subtasks (using the format taskId.subtaskId)
|
||||||
|
|
||||||
|
## Best Practices for AI-Driven Development
|
||||||
|
|
||||||
|
1. **Start with a detailed PRD**: The more detailed your PRD, the better the generated tasks will be.
|
||||||
|
|
||||||
|
2. **Review generated tasks**: After parsing the PRD, review the tasks to ensure they make sense and have appropriate dependencies.
|
||||||
|
|
||||||
|
3. **Analyze task complexity**: Use the complexity analysis feature to identify which tasks should be broken down further.
|
||||||
|
|
||||||
|
4. **Follow the dependency chain**: Always respect task dependencies - the Cursor agent will help with this.
|
||||||
|
|
||||||
|
5. **Update as you go**: If your implementation diverges from the plan, use the update command to keep future tasks aligned with your current approach.
|
||||||
|
|
||||||
|
6. **Break down complex tasks**: Use the expand command to break down complex tasks into manageable subtasks.
|
||||||
|
|
||||||
|
7. **Regenerate task files**: After any updates to tasks.json, regenerate the task files to keep them in sync.
|
||||||
|
|
||||||
|
8. **Communicate context to the agent**: When asking the Cursor agent to help with a task, provide context about what you're trying to achieve.
|
||||||
|
|
||||||
|
9. **Validate dependencies**: Periodically run the validate-dependencies command to check for invalid or circular dependencies.
|
||||||
|
|
||||||
|
## Example Cursor AI Interactions
|
||||||
|
|
||||||
|
### Starting a new project
|
||||||
|
|
||||||
|
```
|
||||||
|
I've just initialized a new project with Claude Task Master. I have a PRD at scripts/prd.txt.
|
||||||
|
Can you help me parse it and set up the initial tasks?
|
||||||
|
```
|
||||||
|
|
||||||
|
### Working on tasks
|
||||||
|
|
||||||
|
```
|
||||||
|
What's the next task I should work on? Please consider dependencies and priorities.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Implementing a specific task
|
||||||
|
|
||||||
|
```
|
||||||
|
I'd like to implement task 4. Can you help me understand what needs to be done and how to approach it?
|
||||||
|
```
|
||||||
|
|
||||||
|
### Managing subtasks
|
||||||
|
|
||||||
|
```
|
||||||
|
I need to regenerate the subtasks for task 3 with a different approach. Can you help me clear and regenerate them?
|
||||||
|
```
|
||||||
|
|
||||||
|
### Handling changes
|
||||||
|
|
||||||
|
```
|
||||||
|
We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks to reflect this change?
|
||||||
|
```
|
||||||
|
|
||||||
|
### Completing work
|
||||||
|
|
||||||
|
```
|
||||||
|
I've finished implementing the authentication system described in task 2. All tests are passing.
|
||||||
|
Please mark it as complete and tell me what I should work on next.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Analyzing complexity
|
||||||
|
|
||||||
|
```
|
||||||
|
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
||||||
|
```
|
||||||
|
|
||||||
|
### Viewing complexity report
|
||||||
|
|
||||||
|
```
|
||||||
|
Can you show me the complexity report in a more readable format?
|
||||||
|
```
|
||||||
|
|||||||
@@ -21,11 +21,9 @@ In an AI-driven development process—particularly with tools like [Cursor](http
|
|||||||
The script can be configured through environment variables in a `.env` file at the root of the project:
|
The script can be configured through environment variables in a `.env` file at the root of the project:
|
||||||
|
|
||||||
### Required Configuration
|
### Required Configuration
|
||||||
|
|
||||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
||||||
|
|
||||||
### Optional Configuration
|
### Optional Configuration
|
||||||
|
|
||||||
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
||||||
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
||||||
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
||||||
@@ -40,10 +38,9 @@ The script can be configured through environment variables in a `.env` file at t
|
|||||||
|
|
||||||
## How It Works
|
## How It Works
|
||||||
|
|
||||||
1. **`tasks.json`**:
|
1. **`tasks.json`**:
|
||||||
|
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
||||||
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
||||||
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
|
||||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||||
|
|
||||||
@@ -53,7 +50,7 @@ The script can be configured through environment variables in a `.env` file at t
|
|||||||
```bash
|
```bash
|
||||||
# If installed globally
|
# If installed globally
|
||||||
task-master [command] [options]
|
task-master [command] [options]
|
||||||
|
|
||||||
# If using locally within the project
|
# If using locally within the project
|
||||||
node scripts/dev.js [command] [options]
|
node scripts/dev.js [command] [options]
|
||||||
```
|
```
|
||||||
@@ -114,7 +111,6 @@ task-master update --file=custom-tasks.json --from=5 --prompt="Change database f
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- The `--prompt` parameter is required and should explain the changes or new context
|
- The `--prompt` parameter is required and should explain the changes or new context
|
||||||
- Only tasks that aren't marked as 'done' will be updated
|
- Only tasks that aren't marked as 'done' will be updated
|
||||||
- Tasks with ID >= the specified --from value will be updated
|
- Tasks with ID >= the specified --from value will be updated
|
||||||
@@ -138,7 +134,6 @@ task-master set-status --id=1,2,3 --status=done
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
|
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
|
||||||
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
|
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
|
||||||
- You can specify multiple task IDs by separating them with commas
|
- You can specify multiple task IDs by separating them with commas
|
||||||
@@ -188,7 +183,6 @@ task-master clear-subtasks --all
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- After clearing subtasks, task files are automatically regenerated
|
- After clearing subtasks, task files are automatically regenerated
|
||||||
- This is useful when you want to regenerate subtasks with a different approach
|
- This is useful when you want to regenerate subtasks with a different approach
|
||||||
- Can be combined with the `expand` command to immediately generate new subtasks
|
- Can be combined with the `expand` command to immediately generate new subtasks
|
||||||
@@ -204,7 +198,6 @@ The script integrates with two AI services:
|
|||||||
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
|
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
|
||||||
|
|
||||||
To use the Perplexity integration:
|
To use the Perplexity integration:
|
||||||
|
|
||||||
1. Obtain a Perplexity API key
|
1. Obtain a Perplexity API key
|
||||||
2. Add `PERPLEXITY_API_KEY` to your `.env` file
|
2. Add `PERPLEXITY_API_KEY` to your `.env` file
|
||||||
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
|
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
|
||||||
@@ -213,7 +206,6 @@ To use the Perplexity integration:
|
|||||||
## Logging
|
## Logging
|
||||||
|
|
||||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
||||||
|
|
||||||
- `debug`: Detailed information, typically useful for troubleshooting
|
- `debug`: Detailed information, typically useful for troubleshooting
|
||||||
- `info`: Confirmation that things are working as expected (default)
|
- `info`: Confirmation that things are working as expected (default)
|
||||||
- `warn`: Warning messages that don't prevent execution
|
- `warn`: Warning messages that don't prevent execution
|
||||||
@@ -236,20 +228,17 @@ task-master remove-dependency --id=<id> --depends-on=<id>
|
|||||||
These commands:
|
These commands:
|
||||||
|
|
||||||
1. **Allow precise dependency management**:
|
1. **Allow precise dependency management**:
|
||||||
|
|
||||||
- Add dependencies between tasks with automatic validation
|
- Add dependencies between tasks with automatic validation
|
||||||
- Remove dependencies when they're no longer needed
|
- Remove dependencies when they're no longer needed
|
||||||
- Update task files automatically after changes
|
- Update task files automatically after changes
|
||||||
|
|
||||||
2. **Include validation checks**:
|
2. **Include validation checks**:
|
||||||
|
|
||||||
- Prevent circular dependencies (a task depending on itself)
|
- Prevent circular dependencies (a task depending on itself)
|
||||||
- Prevent duplicate dependencies
|
- Prevent duplicate dependencies
|
||||||
- Verify that both tasks exist before adding/removing dependencies
|
- Verify that both tasks exist before adding/removing dependencies
|
||||||
- Check if dependencies exist before attempting to remove them
|
- Check if dependencies exist before attempting to remove them
|
||||||
|
|
||||||
3. **Provide clear feedback**:
|
3. **Provide clear feedback**:
|
||||||
|
|
||||||
- Success messages confirm when dependencies are added/removed
|
- Success messages confirm when dependencies are added/removed
|
||||||
- Error messages explain why operations failed (if applicable)
|
- Error messages explain why operations failed (if applicable)
|
||||||
|
|
||||||
@@ -274,7 +263,6 @@ task-master validate-dependencies --file=custom-tasks.json
|
|||||||
```
|
```
|
||||||
|
|
||||||
This command:
|
This command:
|
||||||
|
|
||||||
- Scans all tasks and subtasks for non-existent dependencies
|
- Scans all tasks and subtasks for non-existent dependencies
|
||||||
- Identifies potential self-dependencies (tasks referencing themselves)
|
- Identifies potential self-dependencies (tasks referencing themselves)
|
||||||
- Reports all found issues without modifying files
|
- Reports all found issues without modifying files
|
||||||
@@ -296,7 +284,6 @@ task-master fix-dependencies --file=custom-tasks.json
|
|||||||
```
|
```
|
||||||
|
|
||||||
This command:
|
This command:
|
||||||
|
|
||||||
1. **Validates all dependencies** across tasks and subtasks
|
1. **Validates all dependencies** across tasks and subtasks
|
||||||
2. **Automatically removes**:
|
2. **Automatically removes**:
|
||||||
- References to non-existent tasks and subtasks
|
- References to non-existent tasks and subtasks
|
||||||
@@ -334,7 +321,6 @@ task-master analyze-complexity --research
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
|
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
|
||||||
- Tasks are scored on a scale of 1-10
|
- Tasks are scored on a scale of 1-10
|
||||||
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
|
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
|
||||||
@@ -359,35 +345,33 @@ task-master expand --id=8 --num=5 --prompt="Custom prompt"
|
|||||||
```
|
```
|
||||||
|
|
||||||
When a complexity report exists:
|
When a complexity report exists:
|
||||||
|
|
||||||
- The `expand` command will use the recommended subtask count from the report (unless overridden)
|
- The `expand` command will use the recommended subtask count from the report (unless overridden)
|
||||||
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
|
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
|
||||||
- When using `--all`, tasks are sorted by complexity score (highest first)
|
- When using `--all`, tasks are sorted by complexity score (highest first)
|
||||||
- The `--research` flag is preserved from the complexity analysis to expansion
|
- The `--research` flag is preserved from the complexity analysis to expansion
|
||||||
|
|
||||||
The output report structure is:
|
The output report structure is:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"meta": {
|
"meta": {
|
||||||
"generatedAt": "2023-06-15T12:34:56.789Z",
|
"generatedAt": "2023-06-15T12:34:56.789Z",
|
||||||
"tasksAnalyzed": 20,
|
"tasksAnalyzed": 20,
|
||||||
"thresholdScore": 5,
|
"thresholdScore": 5,
|
||||||
"projectName": "Your Project Name",
|
"projectName": "Your Project Name",
|
||||||
"usedResearch": true
|
"usedResearch": true
|
||||||
},
|
},
|
||||||
"complexityAnalysis": [
|
"complexityAnalysis": [
|
||||||
{
|
{
|
||||||
"taskId": 8,
|
"taskId": 8,
|
||||||
"taskTitle": "Develop Implementation Drift Handling",
|
"taskTitle": "Develop Implementation Drift Handling",
|
||||||
"complexityScore": 9.5,
|
"complexityScore": 9.5,
|
||||||
"recommendedSubtasks": 6,
|
"recommendedSubtasks": 6,
|
||||||
"expansionPrompt": "Create subtasks that handle detecting...",
|
"expansionPrompt": "Create subtasks that handle detecting...",
|
||||||
"reasoning": "This task requires sophisticated logic...",
|
"reasoning": "This task requires sophisticated logic...",
|
||||||
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
||||||
}
|
},
|
||||||
// More tasks sorted by complexity score (highest first)
|
// More tasks sorted by complexity score (highest first)
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -454,4 +438,4 @@ This command:
|
|||||||
- Commands for working with subtasks
|
- Commands for working with subtasks
|
||||||
- For subtasks, provides a link to view the parent task
|
- For subtasks, provides a link to view the parent task
|
||||||
|
|
||||||
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
|
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
|
||||||
30
bin/task-master-init.js
Executable file
30
bin/task-master-init.js
Executable file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Claude Task Master Init
|
||||||
|
* Direct executable for the init command
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { spawn } from 'child_process';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import { dirname, resolve } from 'path';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = dirname(__filename);
|
||||||
|
|
||||||
|
// Get the path to the init script
|
||||||
|
const initScriptPath = resolve(__dirname, '../scripts/init.js');
|
||||||
|
|
||||||
|
// Pass through all arguments
|
||||||
|
const args = process.argv.slice(2);
|
||||||
|
|
||||||
|
// Spawn the init script with all arguments
|
||||||
|
const child = spawn('node', [initScriptPath, ...args], {
|
||||||
|
stdio: 'inherit',
|
||||||
|
cwd: process.cwd()
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle exit
|
||||||
|
child.on('close', (code) => {
|
||||||
|
process.exit(code);
|
||||||
|
});
|
||||||
@@ -44,36 +44,30 @@ const initScriptPath = resolve(__dirname, '../scripts/init.js');
|
|||||||
|
|
||||||
// Helper function to run dev.js with arguments
|
// Helper function to run dev.js with arguments
|
||||||
function runDevScript(args) {
|
function runDevScript(args) {
|
||||||
// Debug: Show the transformed arguments when DEBUG=1 is set
|
// Debug: Show the transformed arguments when DEBUG=1 is set
|
||||||
if (process.env.DEBUG === '1') {
|
if (process.env.DEBUG === '1') {
|
||||||
console.error('\nDEBUG - CLI Wrapper Analysis:');
|
console.error('\nDEBUG - CLI Wrapper Analysis:');
|
||||||
console.error('- Original command: ' + process.argv.join(' '));
|
console.error('- Original command: ' + process.argv.join(' '));
|
||||||
console.error('- Transformed args: ' + args.join(' '));
|
console.error('- Transformed args: ' + args.join(' '));
|
||||||
console.error(
|
console.error('- dev.js will receive: node ' + devScriptPath + ' ' + args.join(' ') + '\n');
|
||||||
'- dev.js will receive: node ' +
|
}
|
||||||
devScriptPath +
|
|
||||||
' ' +
|
// For testing: If TEST_MODE is set, just print args and exit
|
||||||
args.join(' ') +
|
if (process.env.TEST_MODE === '1') {
|
||||||
'\n'
|
console.log('Would execute:');
|
||||||
);
|
console.log(`node ${devScriptPath} ${args.join(' ')}`);
|
||||||
}
|
process.exit(0);
|
||||||
|
return;
|
||||||
// For testing: If TEST_MODE is set, just print args and exit
|
}
|
||||||
if (process.env.TEST_MODE === '1') {
|
|
||||||
console.log('Would execute:');
|
const child = spawn('node', [devScriptPath, ...args], {
|
||||||
console.log(`node ${devScriptPath} ${args.join(' ')}`);
|
stdio: 'inherit',
|
||||||
process.exit(0);
|
cwd: process.cwd()
|
||||||
return;
|
});
|
||||||
}
|
|
||||||
|
child.on('close', (code) => {
|
||||||
const child = spawn('node', [devScriptPath, ...args], {
|
process.exit(code);
|
||||||
stdio: 'inherit',
|
});
|
||||||
cwd: process.cwd()
|
|
||||||
});
|
|
||||||
|
|
||||||
child.on('close', (code) => {
|
|
||||||
process.exit(code);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to detect camelCase and convert to kebab-case
|
// Helper function to detect camelCase and convert to kebab-case
|
||||||
@@ -85,239 +79,228 @@ const toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();
|
|||||||
* @returns {Function} Wrapper action function
|
* @returns {Function} Wrapper action function
|
||||||
*/
|
*/
|
||||||
function createDevScriptAction(commandName) {
|
function createDevScriptAction(commandName) {
|
||||||
return (options, cmd) => {
|
return (options, cmd) => {
|
||||||
// Check for camelCase flags and error out with helpful message
|
// Check for camelCase flags and error out with helpful message
|
||||||
const camelCaseFlags = detectCamelCaseFlags(process.argv);
|
const camelCaseFlags = detectCamelCaseFlags(process.argv);
|
||||||
|
|
||||||
|
// If camelCase flags were found, show error and exit
|
||||||
|
if (camelCaseFlags.length > 0) {
|
||||||
|
console.error('\nError: Please use kebab-case for CLI flags:');
|
||||||
|
camelCaseFlags.forEach(flag => {
|
||||||
|
console.error(` Instead of: --${flag.original}`);
|
||||||
|
console.error(` Use: --${flag.kebabCase}`);
|
||||||
|
});
|
||||||
|
console.error('\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since we've ensured no camelCase flags, we can now just:
|
||||||
|
// 1. Start with the command name
|
||||||
|
const args = [commandName];
|
||||||
|
|
||||||
|
// 3. Get positional arguments and explicit flags from the command line
|
||||||
|
const commandArgs = [];
|
||||||
|
const positionals = new Set(); // Track positional args we've seen
|
||||||
|
|
||||||
|
// Find the command in raw process.argv to extract args
|
||||||
|
const commandIndex = process.argv.indexOf(commandName);
|
||||||
|
if (commandIndex !== -1) {
|
||||||
|
// Process all args after the command name
|
||||||
|
for (let i = commandIndex + 1; i < process.argv.length; i++) {
|
||||||
|
const arg = process.argv[i];
|
||||||
|
|
||||||
|
if (arg.startsWith('--')) {
|
||||||
|
// It's a flag - pass through as is
|
||||||
|
commandArgs.push(arg);
|
||||||
|
// Skip the next arg if this is a flag with a value (not --flag=value format)
|
||||||
|
if (!arg.includes('=') &&
|
||||||
|
i + 1 < process.argv.length &&
|
||||||
|
!process.argv[i+1].startsWith('--')) {
|
||||||
|
commandArgs.push(process.argv[++i]);
|
||||||
|
}
|
||||||
|
} else if (!positionals.has(arg)) {
|
||||||
|
// It's a positional argument we haven't seen
|
||||||
|
commandArgs.push(arg);
|
||||||
|
positionals.add(arg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add all command line args we collected
|
||||||
|
args.push(...commandArgs);
|
||||||
|
|
||||||
|
// 4. Add default options from Commander if not specified on command line
|
||||||
|
// Track which options we've seen on the command line
|
||||||
|
const userOptions = new Set();
|
||||||
|
for (const arg of commandArgs) {
|
||||||
|
if (arg.startsWith('--')) {
|
||||||
|
// Extract option name (without -- and value)
|
||||||
|
const name = arg.split('=')[0].slice(2);
|
||||||
|
userOptions.add(name);
|
||||||
|
|
||||||
|
// Add the kebab-case version too, to prevent duplicates
|
||||||
|
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||||
|
userOptions.add(kebabName);
|
||||||
|
|
||||||
|
// Add the camelCase version as well
|
||||||
|
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) => letter.toUpperCase());
|
||||||
|
userOptions.add(camelName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add Commander-provided defaults for options not specified by user
|
||||||
|
Object.entries(options).forEach(([key, value]) => {
|
||||||
|
// Debug output to see what keys we're getting
|
||||||
|
if (process.env.DEBUG === '1') {
|
||||||
|
console.error(`DEBUG - Processing option: ${key} = ${value}`);
|
||||||
|
}
|
||||||
|
|
||||||
// If camelCase flags were found, show error and exit
|
// Special case for numTasks > num-tasks (a known problem case)
|
||||||
if (camelCaseFlags.length > 0) {
|
if (key === 'numTasks') {
|
||||||
console.error('\nError: Please use kebab-case for CLI flags:');
|
if (process.env.DEBUG === '1') {
|
||||||
camelCaseFlags.forEach((flag) => {
|
console.error('DEBUG - Converting numTasks to num-tasks');
|
||||||
console.error(` Instead of: --${flag.original}`);
|
}
|
||||||
console.error(` Use: --${flag.kebabCase}`);
|
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
|
||||||
});
|
args.push(`--num-tasks=${value}`);
|
||||||
console.error(
|
}
|
||||||
'\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n'
|
return;
|
||||||
);
|
}
|
||||||
process.exit(1);
|
|
||||||
}
|
// Skip built-in Commander properties and options the user provided
|
||||||
|
if (['parent', 'commands', 'options', 'rawArgs'].includes(key) || userOptions.has(key)) {
|
||||||
// Since we've ensured no camelCase flags, we can now just:
|
return;
|
||||||
// 1. Start with the command name
|
}
|
||||||
const args = [commandName];
|
|
||||||
|
// Also check the kebab-case version of this key
|
||||||
// 3. Get positional arguments and explicit flags from the command line
|
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||||
const commandArgs = [];
|
if (userOptions.has(kebabKey)) {
|
||||||
const positionals = new Set(); // Track positional args we've seen
|
return;
|
||||||
|
}
|
||||||
// Find the command in raw process.argv to extract args
|
|
||||||
const commandIndex = process.argv.indexOf(commandName);
|
// Add default values, using kebab-case for the parameter name
|
||||||
if (commandIndex !== -1) {
|
if (value !== undefined) {
|
||||||
// Process all args after the command name
|
if (typeof value === 'boolean') {
|
||||||
for (let i = commandIndex + 1; i < process.argv.length; i++) {
|
if (value === true) {
|
||||||
const arg = process.argv[i];
|
args.push(`--${kebabKey}`);
|
||||||
|
} else if (value === false && key === 'generate') {
|
||||||
if (arg.startsWith('--')) {
|
args.push('--skip-generate');
|
||||||
// It's a flag - pass through as is
|
}
|
||||||
commandArgs.push(arg);
|
} else {
|
||||||
// Skip the next arg if this is a flag with a value (not --flag=value format)
|
// Always use kebab-case for option names
|
||||||
if (
|
args.push(`--${kebabKey}=${value}`);
|
||||||
!arg.includes('=') &&
|
}
|
||||||
i + 1 < process.argv.length &&
|
}
|
||||||
!process.argv[i + 1].startsWith('--')
|
});
|
||||||
) {
|
|
||||||
commandArgs.push(process.argv[++i]);
|
// Special handling for parent parameter (uses -p)
|
||||||
}
|
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
|
||||||
} else if (!positionals.has(arg)) {
|
args.push('-p', options.parent);
|
||||||
// It's a positional argument we haven't seen
|
}
|
||||||
commandArgs.push(arg);
|
|
||||||
positionals.add(arg);
|
// Debug output for troubleshooting
|
||||||
}
|
if (process.env.DEBUG === '1') {
|
||||||
}
|
console.error('DEBUG - Command args:', commandArgs);
|
||||||
}
|
console.error('DEBUG - User options:', Array.from(userOptions));
|
||||||
|
console.error('DEBUG - Commander options:', options);
|
||||||
// Add all command line args we collected
|
console.error('DEBUG - Final args:', args);
|
||||||
args.push(...commandArgs);
|
}
|
||||||
|
|
||||||
// 4. Add default options from Commander if not specified on command line
|
// Run the script with our processed args
|
||||||
// Track which options we've seen on the command line
|
runDevScript(args);
|
||||||
const userOptions = new Set();
|
};
|
||||||
for (const arg of commandArgs) {
|
|
||||||
if (arg.startsWith('--')) {
|
|
||||||
// Extract option name (without -- and value)
|
|
||||||
const name = arg.split('=')[0].slice(2);
|
|
||||||
userOptions.add(name);
|
|
||||||
|
|
||||||
// Add the kebab-case version too, to prevent duplicates
|
|
||||||
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
|
|
||||||
userOptions.add(kebabName);
|
|
||||||
|
|
||||||
// Add the camelCase version as well
|
|
||||||
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) =>
|
|
||||||
letter.toUpperCase()
|
|
||||||
);
|
|
||||||
userOptions.add(camelName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add Commander-provided defaults for options not specified by user
|
|
||||||
Object.entries(options).forEach(([key, value]) => {
|
|
||||||
// Debug output to see what keys we're getting
|
|
||||||
if (process.env.DEBUG === '1') {
|
|
||||||
console.error(`DEBUG - Processing option: ${key} = ${value}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case for numTasks > num-tasks (a known problem case)
|
|
||||||
if (key === 'numTasks') {
|
|
||||||
if (process.env.DEBUG === '1') {
|
|
||||||
console.error('DEBUG - Converting numTasks to num-tasks');
|
|
||||||
}
|
|
||||||
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
|
|
||||||
args.push(`--num-tasks=${value}`);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip built-in Commander properties and options the user provided
|
|
||||||
if (
|
|
||||||
['parent', 'commands', 'options', 'rawArgs'].includes(key) ||
|
|
||||||
userOptions.has(key)
|
|
||||||
) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also check the kebab-case version of this key
|
|
||||||
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
|
|
||||||
if (userOptions.has(kebabKey)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add default values, using kebab-case for the parameter name
|
|
||||||
if (value !== undefined) {
|
|
||||||
if (typeof value === 'boolean') {
|
|
||||||
if (value === true) {
|
|
||||||
args.push(`--${kebabKey}`);
|
|
||||||
} else if (value === false && key === 'generate') {
|
|
||||||
args.push('--skip-generate');
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Always use kebab-case for option names
|
|
||||||
args.push(`--${kebabKey}=${value}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Special handling for parent parameter (uses -p)
|
|
||||||
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
|
|
||||||
args.push('-p', options.parent);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug output for troubleshooting
|
|
||||||
if (process.env.DEBUG === '1') {
|
|
||||||
console.error('DEBUG - Command args:', commandArgs);
|
|
||||||
console.error('DEBUG - User options:', Array.from(userOptions));
|
|
||||||
console.error('DEBUG - Commander options:', options);
|
|
||||||
console.error('DEBUG - Final args:', args);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the script with our processed args
|
|
||||||
runDevScript(args);
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// // Special case for the 'init' command which uses a different script
|
// Special case for the 'init' command which uses a different script
|
||||||
// function registerInitCommand(program) {
|
function registerInitCommand(program) {
|
||||||
// program
|
program
|
||||||
// .command('init')
|
.command('init')
|
||||||
// .description('Initialize a new project')
|
.description('Initialize a new project')
|
||||||
// .option('-y, --yes', 'Skip prompts and use default values')
|
.option('-y, --yes', 'Skip prompts and use default values')
|
||||||
// .option('-n, --name <name>', 'Project name')
|
.option('-n, --name <name>', 'Project name')
|
||||||
// .option('-d, --description <description>', 'Project description')
|
.option('-d, --description <description>', 'Project description')
|
||||||
// .option('-v, --version <version>', 'Project version')
|
.option('-v, --version <version>', 'Project version')
|
||||||
// .option('-a, --author <author>', 'Author name')
|
.option('-a, --author <author>', 'Author name')
|
||||||
// .option('--skip-install', 'Skip installing dependencies')
|
.option('--skip-install', 'Skip installing dependencies')
|
||||||
// .option('--dry-run', 'Show what would be done without making changes')
|
.option('--dry-run', 'Show what would be done without making changes')
|
||||||
// .action((options) => {
|
.action((options) => {
|
||||||
// // Pass through any options to the init script
|
// Pass through any options to the init script
|
||||||
// const args = [
|
const args = ['--yes', 'name', 'description', 'version', 'author', 'skip-install', 'dry-run']
|
||||||
// '--yes',
|
.filter(opt => options[opt])
|
||||||
// 'name',
|
.map(opt => {
|
||||||
// 'description',
|
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
|
||||||
// 'version',
|
return `--${opt}`;
|
||||||
// 'author',
|
}
|
||||||
// 'skip-install',
|
return `--${opt}=${options[opt]}`;
|
||||||
// 'dry-run'
|
});
|
||||||
// ]
|
|
||||||
// .filter((opt) => options[opt])
|
const child = spawn('node', [initScriptPath, ...args], {
|
||||||
// .map((opt) => {
|
stdio: 'inherit',
|
||||||
// if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
|
cwd: process.cwd()
|
||||||
// return `--${opt}`;
|
});
|
||||||
// }
|
|
||||||
// return `--${opt}=${options[opt]}`;
|
child.on('close', (code) => {
|
||||||
// });
|
process.exit(code);
|
||||||
|
});
|
||||||
// const child = spawn('node', [initScriptPath, ...args], {
|
});
|
||||||
// stdio: 'inherit',
|
}
|
||||||
// cwd: process.cwd()
|
|
||||||
// });
|
|
||||||
|
|
||||||
// child.on('close', (code) => {
|
|
||||||
// process.exit(code);
|
|
||||||
// });
|
|
||||||
// });
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Set up the command-line interface
|
// Set up the command-line interface
|
||||||
const program = new Command();
|
const program = new Command();
|
||||||
|
|
||||||
program
|
program
|
||||||
.name('task-master')
|
.name('task-master')
|
||||||
.description('Claude Task Master CLI')
|
.description('Claude Task Master CLI')
|
||||||
.version(version)
|
.version(version)
|
||||||
.addHelpText('afterAll', () => {
|
.addHelpText('afterAll', () => {
|
||||||
// Use the same help display function as dev.js for consistency
|
// Use the same help display function as dev.js for consistency
|
||||||
displayHelp();
|
displayHelp();
|
||||||
return ''; // Return empty string to prevent commander's default help
|
return ''; // Return empty string to prevent commander's default help
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add custom help option to directly call our help display
|
// Add custom help option to directly call our help display
|
||||||
program.helpOption('-h, --help', 'Display help information');
|
program.helpOption('-h, --help', 'Display help information');
|
||||||
program.on('--help', () => {
|
program.on('--help', () => {
|
||||||
displayHelp();
|
displayHelp();
|
||||||
});
|
});
|
||||||
|
|
||||||
// // Add special case commands
|
// Add special case commands
|
||||||
// registerInitCommand(program);
|
registerInitCommand(program);
|
||||||
|
|
||||||
program
|
program
|
||||||
.command('dev')
|
.command('dev')
|
||||||
.description('Run the dev.js script')
|
.description('Run the dev.js script')
|
||||||
.action(() => {
|
.action(() => {
|
||||||
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
||||||
runDevScript(args);
|
runDevScript(args);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use a temporary Command instance to get all command definitions
|
// Use a temporary Command instance to get all command definitions
|
||||||
const tempProgram = new Command();
|
const tempProgram = new Command();
|
||||||
registerCommands(tempProgram);
|
registerCommands(tempProgram);
|
||||||
|
|
||||||
// For each command in the temp instance, add a modified version to our actual program
|
// For each command in the temp instance, add a modified version to our actual program
|
||||||
tempProgram.commands.forEach((cmd) => {
|
tempProgram.commands.forEach(cmd => {
|
||||||
if (['dev'].includes(cmd.name())) {
|
if (['init', 'dev'].includes(cmd.name())) {
|
||||||
// Skip commands we've already defined specially
|
// Skip commands we've already defined specially
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new command with the same name and description
|
// Create a new command with the same name and description
|
||||||
const newCmd = program.command(cmd.name()).description(cmd.description());
|
const newCmd = program
|
||||||
|
.command(cmd.name())
|
||||||
// Copy all options
|
.description(cmd.description());
|
||||||
cmd.options.forEach((opt) => {
|
|
||||||
newCmd.option(opt.flags, opt.description, opt.defaultValue);
|
// Copy all options
|
||||||
});
|
cmd.options.forEach(opt => {
|
||||||
|
newCmd.option(
|
||||||
// Set the action to proxy to dev.js
|
opt.flags,
|
||||||
newCmd.action(createDevScriptAction(cmd.name()));
|
opt.description,
|
||||||
|
opt.defaultValue
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set the action to proxy to dev.js
|
||||||
|
newCmd.action(createDevScriptAction(cmd.name()));
|
||||||
});
|
});
|
||||||
|
|
||||||
// Parse the command line arguments
|
// Parse the command line arguments
|
||||||
@@ -325,56 +308,47 @@ program.parse(process.argv);
|
|||||||
|
|
||||||
// Add global error handling for unknown commands and options
|
// Add global error handling for unknown commands and options
|
||||||
process.on('uncaughtException', (err) => {
|
process.on('uncaughtException', (err) => {
|
||||||
// Check if this is a commander.js unknown option error
|
// Check if this is a commander.js unknown option error
|
||||||
if (err.code === 'commander.unknownOption') {
|
if (err.code === 'commander.unknownOption') {
|
||||||
const option = err.message.match(/'([^']+)'/)?.[1];
|
const option = err.message.match(/'([^']+)'/)?.[1];
|
||||||
const commandArg = process.argv.find(
|
const commandArg = process.argv.find(arg => !arg.startsWith('-') &&
|
||||||
(arg) =>
|
arg !== 'task-master' &&
|
||||||
!arg.startsWith('-') &&
|
!arg.includes('/') &&
|
||||||
arg !== 'task-master' &&
|
arg !== 'node');
|
||||||
!arg.includes('/') &&
|
const command = commandArg || 'unknown';
|
||||||
arg !== 'node'
|
|
||||||
);
|
console.error(chalk.red(`Error: Unknown option '${option}'`));
|
||||||
const command = commandArg || 'unknown';
|
console.error(chalk.yellow(`Run 'task-master ${command} --help' to see available options for this command`));
|
||||||
|
process.exit(1);
|
||||||
console.error(chalk.red(`Error: Unknown option '${option}'`));
|
}
|
||||||
console.error(
|
|
||||||
chalk.yellow(
|
// Check if this is a commander.js unknown command error
|
||||||
`Run 'task-master ${command} --help' to see available options for this command`
|
if (err.code === 'commander.unknownCommand') {
|
||||||
)
|
const command = err.message.match(/'([^']+)'/)?.[1];
|
||||||
);
|
|
||||||
process.exit(1);
|
console.error(chalk.red(`Error: Unknown command '${command}'`));
|
||||||
}
|
console.error(chalk.yellow(`Run 'task-master --help' to see available commands`));
|
||||||
|
process.exit(1);
|
||||||
// Check if this is a commander.js unknown command error
|
}
|
||||||
if (err.code === 'commander.unknownCommand') {
|
|
||||||
const command = err.message.match(/'([^']+)'/)?.[1];
|
// Handle other uncaught exceptions
|
||||||
|
console.error(chalk.red(`Error: ${err.message}`));
|
||||||
console.error(chalk.red(`Error: Unknown command '${command}'`));
|
if (process.env.DEBUG === '1') {
|
||||||
console.error(
|
console.error(err);
|
||||||
chalk.yellow(`Run 'task-master --help' to see available commands`)
|
}
|
||||||
);
|
process.exit(1);
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle other uncaught exceptions
|
|
||||||
console.error(chalk.red(`Error: ${err.message}`));
|
|
||||||
if (process.env.DEBUG === '1') {
|
|
||||||
console.error(err);
|
|
||||||
}
|
|
||||||
process.exit(1);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Show help if no command was provided (just 'task-master' with no args)
|
// Show help if no command was provided (just 'task-master' with no args)
|
||||||
if (process.argv.length <= 2) {
|
if (process.argv.length <= 2) {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
displayHelp();
|
displayHelp();
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add exports at the end of the file
|
// Add exports at the end of the file
|
||||||
if (typeof module !== 'undefined') {
|
if (typeof module !== 'undefined') {
|
||||||
module.exports = {
|
module.exports = {
|
||||||
detectCamelCaseFlags
|
detectCamelCaseFlags
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -41,39 +41,39 @@ Core functions should follow this pattern to support both CLI and MCP use:
|
|||||||
* @returns {Object|undefined} - Returns data when source is 'mcp'
|
* @returns {Object|undefined} - Returns data when source is 'mcp'
|
||||||
*/
|
*/
|
||||||
function exampleFunction(param1, param2, options = {}) {
|
function exampleFunction(param1, param2, options = {}) {
|
||||||
try {
|
try {
|
||||||
// Skip UI for MCP
|
// Skip UI for MCP
|
||||||
if (options.source !== 'mcp') {
|
if (options.source !== 'mcp') {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
console.log(chalk.blue('Processing operation...'));
|
console.log(chalk.blue('Processing operation...'));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the core business logic
|
// Do the core business logic
|
||||||
const result = doSomething(param1, param2);
|
const result = doSomething(param1, param2);
|
||||||
|
|
||||||
// For MCP, return structured data
|
// For MCP, return structured data
|
||||||
if (options.source === 'mcp') {
|
if (options.source === 'mcp') {
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: result
|
data: result
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// For CLI, display output
|
// For CLI, display output
|
||||||
console.log(chalk.green('Operation completed successfully!'));
|
console.log(chalk.green('Operation completed successfully!'));
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Handle errors based on source
|
// Handle errors based on source
|
||||||
if (options.source === 'mcp') {
|
if (options.source === 'mcp') {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: error.message
|
error: error.message
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// CLI error handling
|
// CLI error handling
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -89,17 +89,17 @@ export const simpleFunction = adaptForMcp(originalFunction);
|
|||||||
|
|
||||||
// Split implementation - completely different code paths for CLI vs MCP
|
// Split implementation - completely different code paths for CLI vs MCP
|
||||||
export const complexFunction = sourceSplitFunction(
|
export const complexFunction = sourceSplitFunction(
|
||||||
// CLI version with UI
|
// CLI version with UI
|
||||||
function (param1, param2) {
|
function(param1, param2) {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
console.log(`Processing ${param1}...`);
|
console.log(`Processing ${param1}...`);
|
||||||
// ... CLI implementation
|
// ... CLI implementation
|
||||||
},
|
},
|
||||||
// MCP version with structured return
|
// MCP version with structured return
|
||||||
function (param1, param2, options = {}) {
|
function(param1, param2, options = {}) {
|
||||||
// ... MCP implementation
|
// ... MCP implementation
|
||||||
return { success: true, data };
|
return { success: true, data };
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -110,7 +110,7 @@ When adding new features, follow these steps to ensure CLI and MCP compatibility
|
|||||||
1. **Implement Core Logic** in the appropriate module file
|
1. **Implement Core Logic** in the appropriate module file
|
||||||
2. **Add Source Parameter Support** using the pattern above
|
2. **Add Source Parameter Support** using the pattern above
|
||||||
3. **Add to task-master-core.js** to make it available for direct import
|
3. **Add to task-master-core.js** to make it available for direct import
|
||||||
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
|
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
|
||||||
5. **Create Tool Implementation** in `mcp-server/src/tools/`
|
5. **Create Tool Implementation** in `mcp-server/src/tools/`
|
||||||
6. **Register the Tool** in `mcp-server/src/tools/index.js`
|
6. **Register the Tool** in `mcp-server/src/tools/index.js`
|
||||||
|
|
||||||
@@ -119,39 +119,39 @@ When adding new features, follow these steps to ensure CLI and MCP compatibility
|
|||||||
```javascript
|
```javascript
|
||||||
// In scripts/modules/task-manager.js
|
// In scripts/modules/task-manager.js
|
||||||
export async function newFeature(param1, param2, options = {}) {
|
export async function newFeature(param1, param2, options = {}) {
|
||||||
try {
|
try {
|
||||||
// Source-specific UI
|
// Source-specific UI
|
||||||
if (options.source !== 'mcp') {
|
if (options.source !== 'mcp') {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
console.log(chalk.blue('Running new feature...'));
|
console.log(chalk.blue('Running new feature...'));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shared core logic
|
// Shared core logic
|
||||||
const result = processFeature(param1, param2);
|
const result = processFeature(param1, param2);
|
||||||
|
|
||||||
// Source-specific return handling
|
// Source-specific return handling
|
||||||
if (options.source === 'mcp') {
|
if (options.source === 'mcp') {
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: result
|
data: result
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// CLI output
|
// CLI output
|
||||||
console.log(chalk.green('Feature completed successfully!'));
|
console.log(chalk.green('Feature completed successfully!'));
|
||||||
displayOutput(result);
|
displayOutput(result);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Error handling based on source
|
// Error handling based on source
|
||||||
if (options.source === 'mcp') {
|
if (options.source === 'mcp') {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: error.message
|
error: error.message
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -163,12 +163,12 @@ import { newFeature } from '../../../scripts/modules/task-manager.js';
|
|||||||
|
|
||||||
// Add to exports
|
// Add to exports
|
||||||
export default {
|
export default {
|
||||||
// ... existing functions
|
// ... existing functions
|
||||||
|
|
||||||
async newFeature(args = {}, options = {}) {
|
async newFeature(args = {}, options = {}) {
|
||||||
const { param1, param2 } = args;
|
const { param1, param2 } = args;
|
||||||
return executeFunction(newFeature, [param1, param2], options);
|
return executeFunction(newFeature, [param1, param2], options);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -177,8 +177,8 @@ export default {
|
|||||||
```javascript
|
```javascript
|
||||||
// In mcp-server/src/tools/utils.js
|
// In mcp-server/src/tools/utils.js
|
||||||
const commandMap = {
|
const commandMap = {
|
||||||
// ... existing mappings
|
// ... existing mappings
|
||||||
'new-feature': 'newFeature'
|
'new-feature': 'newFeature'
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -186,53 +186,53 @@ const commandMap = {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In mcp-server/src/tools/newFeature.js
|
// In mcp-server/src/tools/newFeature.js
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
executeTaskMasterCommand,
|
executeTaskMasterCommand,
|
||||||
createContentResponse,
|
createContentResponse,
|
||||||
createErrorResponse
|
createErrorResponse,
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
|
|
||||||
export function registerNewFeatureTool(server) {
|
export function registerNewFeatureTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'newFeature',
|
name: "newFeature",
|
||||||
description: 'Run the new feature',
|
description: "Run the new feature",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
param1: z.string().describe('First parameter'),
|
param1: z.string().describe("First parameter"),
|
||||||
param2: z.number().optional().describe('Second parameter'),
|
param2: z.number().optional().describe("Second parameter"),
|
||||||
file: z.string().optional().describe('Path to the tasks file'),
|
file: z.string().optional().describe("Path to the tasks file"),
|
||||||
projectRoot: z.string().describe('Root directory of the project')
|
projectRoot: z.string().describe("Root directory of the project")
|
||||||
}),
|
}),
|
||||||
execute: async (args, { log }) => {
|
execute: async (args, { log }) => {
|
||||||
try {
|
try {
|
||||||
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
|
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
const cmdArgs = [];
|
const cmdArgs = [];
|
||||||
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
|
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
|
||||||
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
|
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
|
||||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||||
|
|
||||||
const projectRoot = args.projectRoot;
|
const projectRoot = args.projectRoot;
|
||||||
|
|
||||||
// Execute the command
|
// Execute the command
|
||||||
const result = await executeTaskMasterCommand(
|
const result = await executeTaskMasterCommand(
|
||||||
'new-feature',
|
"new-feature",
|
||||||
log,
|
log,
|
||||||
cmdArgs,
|
cmdArgs,
|
||||||
projectRoot
|
projectRoot
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
throw new Error(result.error);
|
throw new Error(result.error);
|
||||||
}
|
}
|
||||||
|
|
||||||
return createContentResponse(result.stdout);
|
return createContentResponse(result.stdout);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error in new feature: ${error.message}`);
|
log.error(`Error in new feature: ${error.message}`);
|
||||||
return createErrorResponse(`Error in new feature: ${error.message}`);
|
return createErrorResponse(`Error in new feature: ${error.message}`);
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -240,11 +240,11 @@ export function registerNewFeatureTool(server) {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In mcp-server/src/tools/index.js
|
// In mcp-server/src/tools/index.js
|
||||||
import { registerNewFeatureTool } from './newFeature.js';
|
import { registerNewFeatureTool } from "./newFeature.js";
|
||||||
|
|
||||||
export function registerTaskMasterTools(server) {
|
export function registerTaskMasterTools(server) {
|
||||||
// ... existing registrations
|
// ... existing registrations
|
||||||
registerNewFeatureTool(server);
|
registerNewFeatureTool(server);
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -266,4 +266,4 @@ node mcp-server/tests/test-command.js newFeature
|
|||||||
2. **Structured Data for MCP** - Return clean JSON objects from MCP source functions
|
2. **Structured Data for MCP** - Return clean JSON objects from MCP source functions
|
||||||
3. **Consistent Error Handling** - Standardize error formats for both interfaces
|
3. **Consistent Error Handling** - Standardize error formats for both interfaces
|
||||||
4. **Documentation** - Update MCP tool documentation when adding new features
|
4. **Documentation** - Update MCP tool documentation when adding new features
|
||||||
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature
|
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
# Task Master Documentation
|
|
||||||
|
|
||||||
Welcome to the Task Master documentation. Use the links below to navigate to the information you need:
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
|
|
||||||
- [Configuration Guide](configuration.md) - Set up environment variables and customize Task Master
|
|
||||||
- [Tutorial](tutorial.md) - Step-by-step guide to getting started with Task Master
|
|
||||||
|
|
||||||
## Reference
|
|
||||||
|
|
||||||
- [Command Reference](command-reference.md) - Complete list of all available commands
|
|
||||||
- [Task Structure](task-structure.md) - Understanding the task format and features
|
|
||||||
|
|
||||||
## Examples & Licensing
|
|
||||||
|
|
||||||
- [Example Interactions](examples.md) - Common Cursor AI interaction examples
|
|
||||||
- [Licensing Information](licensing.md) - Detailed information about the license
|
|
||||||
|
|
||||||
## Need More Help?
|
|
||||||
|
|
||||||
If you can't find what you're looking for in these docs, please check the [main README](../README.md) or visit our [GitHub repository](https://github.com/eyaltoledano/claude-task-master).
|
|
||||||
@@ -6,55 +6,57 @@ This document provides examples of how to use the new AI client utilities with A
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In your direct function implementation:
|
// In your direct function implementation:
|
||||||
import {
|
import {
|
||||||
getAnthropicClientForMCP,
|
getAnthropicClientForMCP,
|
||||||
getModelConfig,
|
getModelConfig,
|
||||||
handleClaudeError
|
handleClaudeError
|
||||||
} from '../utils/ai-client-utils.js';
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
export async function someAiOperationDirect(args, log, context) {
|
export async function someAiOperationDirect(args, log, context) {
|
||||||
try {
|
try {
|
||||||
// Initialize Anthropic client with session from context
|
// Initialize Anthropic client with session from context
|
||||||
const client = getAnthropicClientForMCP(context.session, log);
|
const client = getAnthropicClientForMCP(context.session, log);
|
||||||
|
|
||||||
// Get model configuration with defaults or session overrides
|
// Get model configuration with defaults or session overrides
|
||||||
const modelConfig = getModelConfig(context.session);
|
const modelConfig = getModelConfig(context.session);
|
||||||
|
|
||||||
// Make API call with proper error handling
|
// Make API call with proper error handling
|
||||||
try {
|
try {
|
||||||
const response = await client.messages.create({
|
const response = await client.messages.create({
|
||||||
model: modelConfig.model,
|
model: modelConfig.model,
|
||||||
max_tokens: modelConfig.maxTokens,
|
max_tokens: modelConfig.maxTokens,
|
||||||
temperature: modelConfig.temperature,
|
temperature: modelConfig.temperature,
|
||||||
messages: [{ role: 'user', content: 'Your prompt here' }]
|
messages: [
|
||||||
});
|
{ role: 'user', content: 'Your prompt here' }
|
||||||
|
]
|
||||||
return {
|
});
|
||||||
success: true,
|
|
||||||
data: response
|
return {
|
||||||
};
|
success: true,
|
||||||
} catch (apiError) {
|
data: response
|
||||||
// Use helper to get user-friendly error message
|
};
|
||||||
const friendlyMessage = handleClaudeError(apiError);
|
} catch (apiError) {
|
||||||
|
// Use helper to get user-friendly error message
|
||||||
return {
|
const friendlyMessage = handleClaudeError(apiError);
|
||||||
success: false,
|
|
||||||
error: {
|
return {
|
||||||
code: 'AI_API_ERROR',
|
success: false,
|
||||||
message: friendlyMessage
|
error: {
|
||||||
}
|
code: 'AI_API_ERROR',
|
||||||
};
|
message: friendlyMessage
|
||||||
}
|
}
|
||||||
} catch (error) {
|
};
|
||||||
// Handle client initialization errors
|
}
|
||||||
return {
|
} catch (error) {
|
||||||
success: false,
|
// Handle client initialization errors
|
||||||
error: {
|
return {
|
||||||
code: 'AI_CLIENT_ERROR',
|
success: false,
|
||||||
message: error.message
|
error: {
|
||||||
}
|
code: 'AI_CLIENT_ERROR',
|
||||||
};
|
message: error.message
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -62,85 +64,86 @@ export async function someAiOperationDirect(args, log, context) {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In your MCP tool implementation:
|
// In your MCP tool implementation:
|
||||||
import {
|
import { AsyncOperationManager, StatusCodes } from '../../utils/async-operation-manager.js';
|
||||||
AsyncOperationManager,
|
|
||||||
StatusCodes
|
|
||||||
} from '../../utils/async-operation-manager.js';
|
|
||||||
import { someAiOperationDirect } from '../../core/direct-functions/some-ai-operation.js';
|
import { someAiOperationDirect } from '../../core/direct-functions/some-ai-operation.js';
|
||||||
|
|
||||||
export async function someAiOperation(args, context) {
|
export async function someAiOperation(args, context) {
|
||||||
const { session, mcpLog } = context;
|
const { session, mcpLog } = context;
|
||||||
const log = mcpLog || console;
|
const log = mcpLog || console;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Create operation description
|
// Create operation description
|
||||||
const operationDescription = `AI operation: ${args.someParam}`;
|
const operationDescription = `AI operation: ${args.someParam}`;
|
||||||
|
|
||||||
// Start async operation
|
// Start async operation
|
||||||
const operation = AsyncOperationManager.createOperation(
|
const operation = AsyncOperationManager.createOperation(
|
||||||
operationDescription,
|
operationDescription,
|
||||||
async (reportProgress) => {
|
async (reportProgress) => {
|
||||||
try {
|
try {
|
||||||
// Initial progress report
|
// Initial progress report
|
||||||
reportProgress({
|
reportProgress({
|
||||||
progress: 0,
|
progress: 0,
|
||||||
status: 'Starting AI operation...'
|
status: 'Starting AI operation...'
|
||||||
});
|
});
|
||||||
|
|
||||||
// Call direct function with session and progress reporting
|
// Call direct function with session and progress reporting
|
||||||
const result = await someAiOperationDirect(args, log, {
|
const result = await someAiOperationDirect(
|
||||||
reportProgress,
|
args,
|
||||||
mcpLog: log,
|
log,
|
||||||
session
|
{
|
||||||
});
|
reportProgress,
|
||||||
|
mcpLog: log,
|
||||||
// Final progress update
|
session
|
||||||
reportProgress({
|
}
|
||||||
progress: 100,
|
);
|
||||||
status: result.success ? 'Operation completed' : 'Operation failed',
|
|
||||||
result: result.data,
|
// Final progress update
|
||||||
error: result.error
|
reportProgress({
|
||||||
});
|
progress: 100,
|
||||||
|
status: result.success ? 'Operation completed' : 'Operation failed',
|
||||||
return result;
|
result: result.data,
|
||||||
} catch (error) {
|
error: result.error
|
||||||
// Handle errors in the operation
|
});
|
||||||
reportProgress({
|
|
||||||
progress: 100,
|
return result;
|
||||||
status: 'Operation failed',
|
} catch (error) {
|
||||||
error: {
|
// Handle errors in the operation
|
||||||
message: error.message,
|
reportProgress({
|
||||||
code: error.code || 'OPERATION_FAILED'
|
progress: 100,
|
||||||
}
|
status: 'Operation failed',
|
||||||
});
|
error: {
|
||||||
throw error;
|
message: error.message,
|
||||||
}
|
code: error.code || 'OPERATION_FAILED'
|
||||||
}
|
}
|
||||||
);
|
});
|
||||||
|
throw error;
|
||||||
// Return immediate response with operation ID
|
}
|
||||||
return {
|
}
|
||||||
status: StatusCodes.ACCEPTED,
|
);
|
||||||
body: {
|
|
||||||
success: true,
|
// Return immediate response with operation ID
|
||||||
message: 'Operation started',
|
return {
|
||||||
operationId: operation.id
|
status: StatusCodes.ACCEPTED,
|
||||||
}
|
body: {
|
||||||
};
|
success: true,
|
||||||
} catch (error) {
|
message: 'Operation started',
|
||||||
// Handle errors in the MCP tool
|
operationId: operation.id
|
||||||
log.error(`Error in someAiOperation: ${error.message}`);
|
}
|
||||||
return {
|
};
|
||||||
status: StatusCodes.INTERNAL_SERVER_ERROR,
|
} catch (error) {
|
||||||
body: {
|
// Handle errors in the MCP tool
|
||||||
success: false,
|
log.error(`Error in someAiOperation: ${error.message}`);
|
||||||
error: {
|
return {
|
||||||
code: 'OPERATION_FAILED',
|
status: StatusCodes.INTERNAL_SERVER_ERROR,
|
||||||
message: error.message
|
body: {
|
||||||
}
|
success: false,
|
||||||
}
|
error: {
|
||||||
};
|
code: 'OPERATION_FAILED',
|
||||||
}
|
message: error.message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -148,56 +151,58 @@ export async function someAiOperation(args, context) {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In your direct function:
|
// In your direct function:
|
||||||
import {
|
import {
|
||||||
getPerplexityClientForMCP,
|
getPerplexityClientForMCP,
|
||||||
getBestAvailableAIModel
|
getBestAvailableAIModel
|
||||||
} from '../utils/ai-client-utils.js';
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
export async function researchOperationDirect(args, log, context) {
|
export async function researchOperationDirect(args, log, context) {
|
||||||
try {
|
try {
|
||||||
// Get the best AI model for this operation based on needs
|
// Get the best AI model for this operation based on needs
|
||||||
const { type, client } = await getBestAvailableAIModel(
|
const { type, client } = await getBestAvailableAIModel(
|
||||||
context.session,
|
context.session,
|
||||||
{ requiresResearch: true },
|
{ requiresResearch: true },
|
||||||
log
|
log
|
||||||
);
|
);
|
||||||
|
|
||||||
// Report which model we're using
|
// Report which model we're using
|
||||||
if (context.reportProgress) {
|
if (context.reportProgress) {
|
||||||
await context.reportProgress({
|
await context.reportProgress({
|
||||||
progress: 10,
|
progress: 10,
|
||||||
status: `Using ${type} model for research...`
|
status: `Using ${type} model for research...`
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make API call based on the model type
|
// Make API call based on the model type
|
||||||
if (type === 'perplexity') {
|
if (type === 'perplexity') {
|
||||||
// Call Perplexity
|
// Call Perplexity
|
||||||
const response = await client.chat.completions.create({
|
const response = await client.chat.completions.create({
|
||||||
model: context.session?.env?.PERPLEXITY_MODEL || 'sonar-medium-online',
|
model: context.session?.env?.PERPLEXITY_MODEL || 'sonar-medium-online',
|
||||||
messages: [{ role: 'user', content: args.researchQuery }],
|
messages: [
|
||||||
temperature: 0.1
|
{ role: 'user', content: args.researchQuery }
|
||||||
});
|
],
|
||||||
|
temperature: 0.1
|
||||||
return {
|
});
|
||||||
success: true,
|
|
||||||
data: response.choices[0].message.content
|
return {
|
||||||
};
|
success: true,
|
||||||
} else {
|
data: response.choices[0].message.content
|
||||||
// Call Claude as fallback
|
};
|
||||||
// (Implementation depends on specific needs)
|
} else {
|
||||||
// ...
|
// Call Claude as fallback
|
||||||
}
|
// (Implementation depends on specific needs)
|
||||||
} catch (error) {
|
// ...
|
||||||
// Handle errors
|
}
|
||||||
return {
|
} catch (error) {
|
||||||
success: false,
|
// Handle errors
|
||||||
error: {
|
return {
|
||||||
code: 'RESEARCH_ERROR',
|
success: false,
|
||||||
message: error.message
|
error: {
|
||||||
}
|
code: 'RESEARCH_ERROR',
|
||||||
};
|
message: error.message
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -209,9 +214,9 @@ import { getModelConfig } from '../utils/ai-client-utils.js';
|
|||||||
|
|
||||||
// Using custom defaults for a specific operation
|
// Using custom defaults for a specific operation
|
||||||
const operationDefaults = {
|
const operationDefaults = {
|
||||||
model: 'claude-3-haiku-20240307', // Faster, smaller model
|
model: 'claude-3-haiku-20240307', // Faster, smaller model
|
||||||
maxTokens: 1000, // Lower token limit
|
maxTokens: 1000, // Lower token limit
|
||||||
temperature: 0.2 // Lower temperature for more deterministic output
|
temperature: 0.2 // Lower temperature for more deterministic output
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get model config with operation-specific defaults
|
// Get model config with operation-specific defaults
|
||||||
@@ -219,34 +224,30 @@ const modelConfig = getModelConfig(context.session, operationDefaults);
|
|||||||
|
|
||||||
// Now use modelConfig in your API calls
|
// Now use modelConfig in your API calls
|
||||||
const response = await client.messages.create({
|
const response = await client.messages.create({
|
||||||
model: modelConfig.model,
|
model: modelConfig.model,
|
||||||
max_tokens: modelConfig.maxTokens,
|
max_tokens: modelConfig.maxTokens,
|
||||||
temperature: modelConfig.temperature
|
temperature: modelConfig.temperature,
|
||||||
// Other parameters...
|
// Other parameters...
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|
||||||
1. **Error Handling**:
|
1. **Error Handling**:
|
||||||
|
|
||||||
- Always use try/catch blocks around both client initialization and API calls
|
- Always use try/catch blocks around both client initialization and API calls
|
||||||
- Use `handleClaudeError` to provide user-friendly error messages
|
- Use `handleClaudeError` to provide user-friendly error messages
|
||||||
- Return standardized error objects with code and message
|
- Return standardized error objects with code and message
|
||||||
|
|
||||||
2. **Progress Reporting**:
|
2. **Progress Reporting**:
|
||||||
|
|
||||||
- Report progress at key points (starting, processing, completing)
|
- Report progress at key points (starting, processing, completing)
|
||||||
- Include meaningful status messages
|
- Include meaningful status messages
|
||||||
- Include error details in progress reports when failures occur
|
- Include error details in progress reports when failures occur
|
||||||
|
|
||||||
3. **Session Handling**:
|
3. **Session Handling**:
|
||||||
|
|
||||||
- Always pass the session from the context to the AI client getters
|
- Always pass the session from the context to the AI client getters
|
||||||
- Use `getModelConfig` to respect user settings from session
|
- Use `getModelConfig` to respect user settings from session
|
||||||
|
|
||||||
4. **Model Selection**:
|
4. **Model Selection**:
|
||||||
|
|
||||||
- Use `getBestAvailableAIModel` when you need to select between different models
|
- Use `getBestAvailableAIModel` when you need to select between different models
|
||||||
- Set `requiresResearch: true` when you need Perplexity capabilities
|
- Set `requiresResearch: true` when you need Perplexity capabilities
|
||||||
|
|
||||||
@@ -254,4 +255,4 @@ const response = await client.messages.create({
|
|||||||
- Create descriptive operation names
|
- Create descriptive operation names
|
||||||
- Handle all errors within the operation function
|
- Handle all errors within the operation function
|
||||||
- Return standardized results from direct functions
|
- Return standardized results from direct functions
|
||||||
- Return immediate responses with operation IDs
|
- Return immediate responses with operation IDs
|
||||||
@@ -1,205 +0,0 @@
|
|||||||
# Task Master Command Reference
|
|
||||||
|
|
||||||
Here's a comprehensive reference of all available commands:
|
|
||||||
|
|
||||||
## Parse PRD
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Parse a PRD file and generate tasks
|
|
||||||
task-master parse-prd <prd-file.txt>
|
|
||||||
|
|
||||||
# Limit the number of tasks generated
|
|
||||||
task-master parse-prd <prd-file.txt> --num-tasks=10
|
|
||||||
```
|
|
||||||
|
|
||||||
## List Tasks
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# List all tasks
|
|
||||||
task-master list
|
|
||||||
|
|
||||||
# List tasks with a specific status
|
|
||||||
task-master list --status=<status>
|
|
||||||
|
|
||||||
# List tasks with subtasks
|
|
||||||
task-master list --with-subtasks
|
|
||||||
|
|
||||||
# List tasks with a specific status and include subtasks
|
|
||||||
task-master list --status=<status> --with-subtasks
|
|
||||||
```
|
|
||||||
|
|
||||||
## Show Next Task
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Show the next task to work on based on dependencies and status
|
|
||||||
task-master next
|
|
||||||
```
|
|
||||||
|
|
||||||
## Show Specific Task
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Show details of a specific task
|
|
||||||
task-master show <id>
|
|
||||||
# or
|
|
||||||
task-master show --id=<id>
|
|
||||||
|
|
||||||
# View a specific subtask (e.g., subtask 2 of task 1)
|
|
||||||
task-master show 1.2
|
|
||||||
```
|
|
||||||
|
|
||||||
## Update Tasks
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Update tasks from a specific ID and provide context
|
|
||||||
task-master update --from=<id> --prompt="<prompt>"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Update a Specific Task
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Update a single task by ID with new information
|
|
||||||
task-master update-task --id=<id> --prompt="<prompt>"
|
|
||||||
|
|
||||||
# Use research-backed updates with Perplexity AI
|
|
||||||
task-master update-task --id=<id> --prompt="<prompt>" --research
|
|
||||||
```
|
|
||||||
|
|
||||||
## Update a Subtask
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Append additional information to a specific subtask
|
|
||||||
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>"
|
|
||||||
|
|
||||||
# Example: Add details about API rate limiting to subtask 2 of task 5
|
|
||||||
task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute"
|
|
||||||
|
|
||||||
# Use research-backed updates with Perplexity AI
|
|
||||||
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --research
|
|
||||||
```
|
|
||||||
|
|
||||||
Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content.
|
|
||||||
|
|
||||||
## Generate Task Files
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Generate individual task files from tasks.json
|
|
||||||
task-master generate
|
|
||||||
```
|
|
||||||
|
|
||||||
## Set Task Status
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Set status of a single task
|
|
||||||
task-master set-status --id=<id> --status=<status>
|
|
||||||
|
|
||||||
# Set status for multiple tasks
|
|
||||||
task-master set-status --id=1,2,3 --status=<status>
|
|
||||||
|
|
||||||
# Set status for subtasks
|
|
||||||
task-master set-status --id=1.1,1.2 --status=<status>
|
|
||||||
```
|
|
||||||
|
|
||||||
When marking a task as "done", all of its subtasks will automatically be marked as "done" as well.
|
|
||||||
|
|
||||||
## Expand Tasks
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Expand a specific task with subtasks
|
|
||||||
task-master expand --id=<id> --num=<number>
|
|
||||||
|
|
||||||
# Expand with additional context
|
|
||||||
task-master expand --id=<id> --prompt="<context>"
|
|
||||||
|
|
||||||
# Expand all pending tasks
|
|
||||||
task-master expand --all
|
|
||||||
|
|
||||||
# Force regeneration of subtasks for tasks that already have them
|
|
||||||
task-master expand --all --force
|
|
||||||
|
|
||||||
# Research-backed subtask generation for a specific task
|
|
||||||
task-master expand --id=<id> --research
|
|
||||||
|
|
||||||
# Research-backed generation for all tasks
|
|
||||||
task-master expand --all --research
|
|
||||||
```
|
|
||||||
|
|
||||||
## Clear Subtasks
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Clear subtasks from a specific task
|
|
||||||
task-master clear-subtasks --id=<id>
|
|
||||||
|
|
||||||
# Clear subtasks from multiple tasks
|
|
||||||
task-master clear-subtasks --id=1,2,3
|
|
||||||
|
|
||||||
# Clear subtasks from all tasks
|
|
||||||
task-master clear-subtasks --all
|
|
||||||
```
|
|
||||||
|
|
||||||
## Analyze Task Complexity
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Analyze complexity of all tasks
|
|
||||||
task-master analyze-complexity
|
|
||||||
|
|
||||||
# Save report to a custom location
|
|
||||||
task-master analyze-complexity --output=my-report.json
|
|
||||||
|
|
||||||
# Use a specific LLM model
|
|
||||||
task-master analyze-complexity --model=claude-3-opus-20240229
|
|
||||||
|
|
||||||
# Set a custom complexity threshold (1-10)
|
|
||||||
task-master analyze-complexity --threshold=6
|
|
||||||
|
|
||||||
# Use an alternative tasks file
|
|
||||||
task-master analyze-complexity --file=custom-tasks.json
|
|
||||||
|
|
||||||
# Use Perplexity AI for research-backed complexity analysis
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
```
|
|
||||||
|
|
||||||
## View Complexity Report
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Display the task complexity analysis report
|
|
||||||
task-master complexity-report
|
|
||||||
|
|
||||||
# View a report at a custom location
|
|
||||||
task-master complexity-report --file=my-report.json
|
|
||||||
```
|
|
||||||
|
|
||||||
## Managing Task Dependencies
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Add a dependency to a task
|
|
||||||
task-master add-dependency --id=<id> --depends-on=<id>
|
|
||||||
|
|
||||||
# Remove a dependency from a task
|
|
||||||
task-master remove-dependency --id=<id> --depends-on=<id>
|
|
||||||
|
|
||||||
# Validate dependencies without fixing them
|
|
||||||
task-master validate-dependencies
|
|
||||||
|
|
||||||
# Find and fix invalid dependencies automatically
|
|
||||||
task-master fix-dependencies
|
|
||||||
```
|
|
||||||
|
|
||||||
## Add a New Task
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Add a new task using AI
|
|
||||||
task-master add-task --prompt="Description of the new task"
|
|
||||||
|
|
||||||
# Add a task with dependencies
|
|
||||||
task-master add-task --prompt="Description" --dependencies=1,2,3
|
|
||||||
|
|
||||||
# Add a task with priority
|
|
||||||
task-master add-task --prompt="Description" --priority=high
|
|
||||||
```
|
|
||||||
|
|
||||||
## Initialize a Project
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Initialize a new project with Task Master structure
|
|
||||||
task-master init
|
|
||||||
```
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
# Configuration
|
|
||||||
|
|
||||||
Task Master can be configured through environment variables in a `.env` file at the root of your project.
|
|
||||||
|
|
||||||
## Required Configuration
|
|
||||||
|
|
||||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude (Example: `ANTHROPIC_API_KEY=sk-ant-api03-...`)
|
|
||||||
|
|
||||||
## Optional Configuration
|
|
||||||
|
|
||||||
- `MODEL` (Default: `"claude-3-7-sonnet-20250219"`): Claude model to use (Example: `MODEL=claude-3-opus-20240229`)
|
|
||||||
- `MAX_TOKENS` (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
|
|
||||||
- `TEMPERATURE` (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
|
|
||||||
- `DEBUG` (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
|
|
||||||
- `LOG_LEVEL` (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`)
|
|
||||||
- `DEFAULT_SUBTASKS` (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
|
|
||||||
- `DEFAULT_PRIORITY` (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
|
|
||||||
- `PROJECT_NAME` (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
|
|
||||||
- `PROJECT_VERSION` (Default: `"1.0.0"`): Version in metadata (Example: `PROJECT_VERSION=2.1.0`)
|
|
||||||
- `PERPLEXITY_API_KEY`: For research-backed features (Example: `PERPLEXITY_API_KEY=pplx-...`)
|
|
||||||
- `PERPLEXITY_MODEL` (Default: `"sonar-medium-online"`): Perplexity model (Example: `PERPLEXITY_MODEL=sonar-large-online`)
|
|
||||||
|
|
||||||
## Example .env File
|
|
||||||
|
|
||||||
```
|
|
||||||
# Required
|
|
||||||
ANTHROPIC_API_KEY=sk-ant-api03-your-api-key
|
|
||||||
|
|
||||||
# Optional - Claude Configuration
|
|
||||||
MODEL=claude-3-7-sonnet-20250219
|
|
||||||
MAX_TOKENS=4000
|
|
||||||
TEMPERATURE=0.7
|
|
||||||
|
|
||||||
# Optional - Perplexity API for Research
|
|
||||||
PERPLEXITY_API_KEY=pplx-your-api-key
|
|
||||||
PERPLEXITY_MODEL=sonar-medium-online
|
|
||||||
|
|
||||||
# Optional - Project Info
|
|
||||||
PROJECT_NAME=My Project
|
|
||||||
PROJECT_VERSION=1.0.0
|
|
||||||
|
|
||||||
# Optional - Application Configuration
|
|
||||||
DEFAULT_SUBTASKS=3
|
|
||||||
DEFAULT_PRIORITY=medium
|
|
||||||
DEBUG=false
|
|
||||||
LOG_LEVEL=info
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### If `task-master init` doesn't respond:
|
|
||||||
|
|
||||||
Try running it with Node directly:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
node node_modules/claude-task-master/scripts/init.js
|
|
||||||
```
|
|
||||||
|
|
||||||
Or clone the repository and run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/eyaltoledano/claude-task-master.git
|
|
||||||
cd claude-task-master
|
|
||||||
node scripts/init.js
|
|
||||||
```
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
# Example Cursor AI Interactions
|
|
||||||
|
|
||||||
Here are some common interactions with Cursor AI when using Task Master:
|
|
||||||
|
|
||||||
## Starting a new project
|
|
||||||
|
|
||||||
```
|
|
||||||
I've just initialized a new project with Claude Task Master. I have a PRD at scripts/prd.txt.
|
|
||||||
Can you help me parse it and set up the initial tasks?
|
|
||||||
```
|
|
||||||
|
|
||||||
## Working on tasks
|
|
||||||
|
|
||||||
```
|
|
||||||
What's the next task I should work on? Please consider dependencies and priorities.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Implementing a specific task
|
|
||||||
|
|
||||||
```
|
|
||||||
I'd like to implement task 4. Can you help me understand what needs to be done and how to approach it?
|
|
||||||
```
|
|
||||||
|
|
||||||
## Managing subtasks
|
|
||||||
|
|
||||||
```
|
|
||||||
I need to regenerate the subtasks for task 3 with a different approach. Can you help me clear and regenerate them?
|
|
||||||
```
|
|
||||||
|
|
||||||
## Handling changes
|
|
||||||
|
|
||||||
```
|
|
||||||
We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks to reflect this change?
|
|
||||||
```
|
|
||||||
|
|
||||||
## Completing work
|
|
||||||
|
|
||||||
```
|
|
||||||
I've finished implementing the authentication system described in task 2. All tests are passing.
|
|
||||||
Please mark it as complete and tell me what I should work on next.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Analyzing complexity
|
|
||||||
|
|
||||||
```
|
|
||||||
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
|
||||||
```
|
|
||||||
|
|
||||||
## Viewing complexity report
|
|
||||||
|
|
||||||
```
|
|
||||||
Can you show me the complexity report in a more readable format?
|
|
||||||
```
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
# Licensing
|
|
||||||
|
|
||||||
Task Master is licensed under the MIT License with Commons Clause. This means you can:
|
|
||||||
|
|
||||||
## ✅ Allowed:
|
|
||||||
|
|
||||||
- Use Task Master for any purpose (personal, commercial, academic)
|
|
||||||
- Modify the code
|
|
||||||
- Distribute copies
|
|
||||||
- Create and sell products built using Task Master
|
|
||||||
|
|
||||||
## ❌ Not Allowed:
|
|
||||||
|
|
||||||
- Sell Task Master itself
|
|
||||||
- Offer Task Master as a hosted service
|
|
||||||
- Create competing products based on Task Master
|
|
||||||
|
|
||||||
See the [LICENSE](../LICENSE) file for the complete license text.
|
|
||||||
2128
docs/mcp-protocol-schema-03262025.json
Normal file
2128
docs/mcp-protocol-schema-03262025.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,139 +0,0 @@
|
|||||||
# Task Structure
|
|
||||||
|
|
||||||
Tasks in Task Master follow a specific format designed to provide comprehensive information for both humans and AI assistants.
|
|
||||||
|
|
||||||
## Task Fields in tasks.json
|
|
||||||
|
|
||||||
Tasks in tasks.json have the following structure:
|
|
||||||
|
|
||||||
- `id`: Unique identifier for the task (Example: `1`)
|
|
||||||
- `title`: Brief, descriptive title of the task (Example: `"Initialize Repo"`)
|
|
||||||
- `description`: Concise description of what the task involves (Example: `"Create a new repository, set up initial structure."`)
|
|
||||||
- `status`: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
|
|
||||||
- `dependencies`: IDs of tasks that must be completed before this task (Example: `[1, 2]`)
|
|
||||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
|
|
||||||
- This helps quickly identify which prerequisite tasks are blocking work
|
|
||||||
- `priority`: Importance level of the task (Example: `"high"`, `"medium"`, `"low"`)
|
|
||||||
- `details`: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
|
|
||||||
- `testStrategy`: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
|
|
||||||
- `subtasks`: List of smaller, more specific tasks that make up the main task (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
|
|
||||||
|
|
||||||
## Task File Format
|
|
||||||
|
|
||||||
Individual task files follow this format:
|
|
||||||
|
|
||||||
```
|
|
||||||
# Task ID: <id>
|
|
||||||
# Title: <title>
|
|
||||||
# Status: <status>
|
|
||||||
# Dependencies: <comma-separated list of dependency IDs>
|
|
||||||
# Priority: <priority>
|
|
||||||
# Description: <brief description>
|
|
||||||
# Details:
|
|
||||||
<detailed implementation notes>
|
|
||||||
|
|
||||||
# Test Strategy:
|
|
||||||
<verification approach>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Features in Detail
|
|
||||||
|
|
||||||
### Analyzing Task Complexity
|
|
||||||
|
|
||||||
The `analyze-complexity` command:
|
|
||||||
|
|
||||||
- Analyzes each task using AI to assess its complexity on a scale of 1-10
|
|
||||||
- Recommends optimal number of subtasks based on configured DEFAULT_SUBTASKS
|
|
||||||
- Generates tailored prompts for expanding each task
|
|
||||||
- Creates a comprehensive JSON report with ready-to-use commands
|
|
||||||
- Saves the report to scripts/task-complexity-report.json by default
|
|
||||||
|
|
||||||
The generated report contains:
|
|
||||||
|
|
||||||
- Complexity analysis for each task (scored 1-10)
|
|
||||||
- Recommended number of subtasks based on complexity
|
|
||||||
- AI-generated expansion prompts customized for each task
|
|
||||||
- Ready-to-run expansion commands directly within each task analysis
|
|
||||||
|
|
||||||
### Viewing Complexity Report
|
|
||||||
|
|
||||||
The `complexity-report` command:
|
|
||||||
|
|
||||||
- Displays a formatted, easy-to-read version of the complexity analysis report
|
|
||||||
- Shows tasks organized by complexity score (highest to lowest)
|
|
||||||
- Provides complexity distribution statistics (low, medium, high)
|
|
||||||
- Highlights tasks recommended for expansion based on threshold score
|
|
||||||
- Includes ready-to-use expansion commands for each complex task
|
|
||||||
- If no report exists, offers to generate one on the spot
|
|
||||||
|
|
||||||
### Smart Task Expansion
|
|
||||||
|
|
||||||
The `expand` command automatically checks for and uses the complexity report:
|
|
||||||
|
|
||||||
When a complexity report exists:
|
|
||||||
|
|
||||||
- Tasks are automatically expanded using the recommended subtask count and prompts
|
|
||||||
- When expanding all tasks, they're processed in order of complexity (highest first)
|
|
||||||
- Research-backed generation is preserved from the complexity analysis
|
|
||||||
- You can still override recommendations with explicit command-line options
|
|
||||||
|
|
||||||
Example workflow:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Generate the complexity analysis report with research capabilities
|
|
||||||
task-master analyze-complexity --research
|
|
||||||
|
|
||||||
# Review the report in a readable format
|
|
||||||
task-master complexity-report
|
|
||||||
|
|
||||||
# Expand tasks using the optimized recommendations
|
|
||||||
task-master expand --id=8
|
|
||||||
# or expand all tasks
|
|
||||||
task-master expand --all
|
|
||||||
```
|
|
||||||
|
|
||||||
### Finding the Next Task
|
|
||||||
|
|
||||||
The `next` command:
|
|
||||||
|
|
||||||
- Identifies tasks that are pending/in-progress and have all dependencies satisfied
|
|
||||||
- Prioritizes tasks by priority level, dependency count, and task ID
|
|
||||||
- Displays comprehensive information about the selected task:
|
|
||||||
- Basic task details (ID, title, priority, dependencies)
|
|
||||||
- Implementation details
|
|
||||||
- Subtasks (if they exist)
|
|
||||||
- Provides contextual suggested actions:
|
|
||||||
- Command to mark the task as in-progress
|
|
||||||
- Command to mark the task as done
|
|
||||||
- Commands for working with subtasks
|
|
||||||
|
|
||||||
### Viewing Specific Task Details
|
|
||||||
|
|
||||||
The `show` command:
|
|
||||||
|
|
||||||
- Displays comprehensive details about a specific task or subtask
|
|
||||||
- Shows task status, priority, dependencies, and detailed implementation notes
|
|
||||||
- For parent tasks, displays all subtasks and their status
|
|
||||||
- For subtasks, shows parent task relationship
|
|
||||||
- Provides contextual action suggestions based on the task's state
|
|
||||||
- Works with both regular tasks and subtasks (using the format taskId.subtaskId)
|
|
||||||
|
|
||||||
## Best Practices for AI-Driven Development
|
|
||||||
|
|
||||||
1. **Start with a detailed PRD**: The more detailed your PRD, the better the generated tasks will be.
|
|
||||||
|
|
||||||
2. **Review generated tasks**: After parsing the PRD, review the tasks to ensure they make sense and have appropriate dependencies.
|
|
||||||
|
|
||||||
3. **Analyze task complexity**: Use the complexity analysis feature to identify which tasks should be broken down further.
|
|
||||||
|
|
||||||
4. **Follow the dependency chain**: Always respect task dependencies - the Cursor agent will help with this.
|
|
||||||
|
|
||||||
5. **Update as you go**: If your implementation diverges from the plan, use the update command to keep future tasks aligned with your current approach.
|
|
||||||
|
|
||||||
6. **Break down complex tasks**: Use the expand command to break down complex tasks into manageable subtasks.
|
|
||||||
|
|
||||||
7. **Regenerate task files**: After any updates to tasks.json, regenerate the task files to keep them in sync.
|
|
||||||
|
|
||||||
8. **Communicate context to the agent**: When asking the Cursor agent to help with a task, provide context about what you're trying to achieve.
|
|
||||||
|
|
||||||
9. **Validate dependencies**: Periodically run the validate-dependencies command to check for invalid or circular dependencies.
|
|
||||||
355
docs/tutorial.md
355
docs/tutorial.md
@@ -1,355 +0,0 @@
|
|||||||
# Task Master Tutorial
|
|
||||||
|
|
||||||
This tutorial will guide you through setting up and using Task Master for AI-driven development.
|
|
||||||
|
|
||||||
## Initial Setup
|
|
||||||
|
|
||||||
There are two ways to set up Task Master: using MCP (recommended) or via npm installation.
|
|
||||||
|
|
||||||
### Option 1: Using MCP (Recommended)
|
|
||||||
|
|
||||||
MCP (Model Control Protocol) provides the easiest way to get started with Task Master directly in your editor.
|
|
||||||
|
|
||||||
1. **Add the MCP config to your editor** (Cursor recommended, but it works with other text editors):
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"mcpServers": {
|
|
||||||
"taskmaster-ai": {
|
|
||||||
"command": "npx",
|
|
||||||
"args": ["-y", "task-master-ai"],
|
|
||||||
"env": {
|
|
||||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
|
||||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
|
||||||
"MODEL": "claude-3-7-sonnet-20250219",
|
|
||||||
"PERPLEXITY_MODEL": "sonar-pro",
|
|
||||||
"MAX_TOKENS": 64000,
|
|
||||||
"TEMPERATURE": 0.2,
|
|
||||||
"DEFAULT_SUBTASKS": 5,
|
|
||||||
"DEFAULT_PRIORITY": "medium"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
2. **Enable the MCP** in your editor settings
|
|
||||||
|
|
||||||
3. **Prompt the AI** to initialize Task Master:
|
|
||||||
|
|
||||||
```
|
|
||||||
Can you please initialize taskmaster-ai into my project?
|
|
||||||
```
|
|
||||||
|
|
||||||
The AI will:
|
|
||||||
|
|
||||||
- Create necessary project structure
|
|
||||||
- Set up initial configuration files
|
|
||||||
- Guide you through the rest of the process
|
|
||||||
|
|
||||||
4. Place your PRD document in the `scripts/` directory (e.g., `scripts/prd.txt`)
|
|
||||||
|
|
||||||
5. **Use natural language commands** to interact with Task Master:
|
|
||||||
|
|
||||||
```
|
|
||||||
Can you parse my PRD at scripts/prd.txt?
|
|
||||||
What's the next task I should work on?
|
|
||||||
Can you help me implement task 3?
|
|
||||||
```
|
|
||||||
|
|
||||||
### Option 2: Manual Installation
|
|
||||||
|
|
||||||
If you prefer to use the command line interface directly:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Install globally
|
|
||||||
npm install -g task-master-ai
|
|
||||||
|
|
||||||
# OR install locally within your project
|
|
||||||
npm install task-master-ai
|
|
||||||
```
|
|
||||||
|
|
||||||
Initialize a new project:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# If installed globally
|
|
||||||
task-master init
|
|
||||||
|
|
||||||
# If installed locally
|
|
||||||
npx task-master-init
|
|
||||||
```
|
|
||||||
|
|
||||||
This will prompt you for project details and set up a new project with the necessary files and structure.
|
|
||||||
|
|
||||||
## Common Commands
|
|
||||||
|
|
||||||
After setting up Task Master, you can use these commands (either via AI prompts or CLI):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Parse a PRD and generate tasks
|
|
||||||
task-master parse-prd your-prd.txt
|
|
||||||
|
|
||||||
# List all tasks
|
|
||||||
task-master list
|
|
||||||
|
|
||||||
# Show the next task to work on
|
|
||||||
task-master next
|
|
||||||
|
|
||||||
# Generate task files
|
|
||||||
task-master generate
|
|
||||||
```
|
|
||||||
|
|
||||||
## Setting up Cursor AI Integration
|
|
||||||
|
|
||||||
Task Master is designed to work seamlessly with [Cursor AI](https://www.cursor.so/), providing a structured workflow for AI-driven development.
|
|
||||||
|
|
||||||
### Using Cursor with MCP (Recommended)
|
|
||||||
|
|
||||||
If you've already set up Task Master with MCP in Cursor, the integration is automatic. You can simply use natural language to interact with Task Master:
|
|
||||||
|
|
||||||
```
|
|
||||||
What tasks are available to work on next?
|
|
||||||
Can you analyze the complexity of our tasks?
|
|
||||||
I'd like to implement task 4. What does it involve?
|
|
||||||
```
|
|
||||||
|
|
||||||
### Manual Cursor Setup
|
|
||||||
|
|
||||||
If you're not using MCP, you can still set up Cursor integration:
|
|
||||||
|
|
||||||
1. After initializing your project, open it in Cursor
|
|
||||||
2. The `.cursor/rules/dev_workflow.mdc` file is automatically loaded by Cursor, providing the AI with knowledge about the task management system
|
|
||||||
3. Place your PRD document in the `scripts/` directory (e.g., `scripts/prd.txt`)
|
|
||||||
4. Open Cursor's AI chat and switch to Agent mode
|
|
||||||
|
|
||||||
### Alternative MCP Setup in Cursor
|
|
||||||
|
|
||||||
You can also set up the MCP server in Cursor settings:
|
|
||||||
|
|
||||||
1. Go to Cursor settings
|
|
||||||
2. Navigate to the MCP section
|
|
||||||
3. Click on "Add New MCP Server"
|
|
||||||
4. Configure with the following details:
|
|
||||||
- Name: "Task Master"
|
|
||||||
- Type: "Command"
|
|
||||||
- Command: "npx -y task-master-mcp"
|
|
||||||
5. Save the settings
|
|
||||||
|
|
||||||
Once configured, you can interact with Task Master's task management commands directly through Cursor's interface, providing a more integrated experience.
|
|
||||||
|
|
||||||
## Initial Task Generation
|
|
||||||
|
|
||||||
In Cursor's AI chat, instruct the agent to generate tasks from your PRD:
|
|
||||||
|
|
||||||
```
|
|
||||||
Please use the task-master parse-prd command to generate tasks from my PRD. The PRD is located at scripts/prd.txt.
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will execute:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master parse-prd scripts/prd.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
This will:
|
|
||||||
|
|
||||||
- Parse your PRD document
|
|
||||||
- Generate a structured `tasks.json` file with tasks, dependencies, priorities, and test strategies
|
|
||||||
- The agent will understand this process due to the Cursor rules
|
|
||||||
|
|
||||||
### Generate Individual Task Files
|
|
||||||
|
|
||||||
Next, ask the agent to generate individual task files:
|
|
||||||
|
|
||||||
```
|
|
||||||
Please generate individual task files from tasks.json
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will execute:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master generate
|
|
||||||
```
|
|
||||||
|
|
||||||
This creates individual task files in the `tasks/` directory (e.g., `task_001.txt`, `task_002.txt`), making it easier to reference specific tasks.
|
|
||||||
|
|
||||||
## AI-Driven Development Workflow
|
|
||||||
|
|
||||||
The Cursor agent is pre-configured (via the rules file) to follow this workflow:
|
|
||||||
|
|
||||||
### 1. Task Discovery and Selection
|
|
||||||
|
|
||||||
Ask the agent to list available tasks:
|
|
||||||
|
|
||||||
```
|
|
||||||
What tasks are available to work on next?
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will:
|
|
||||||
|
|
||||||
- Run `task-master list` to see all tasks
|
|
||||||
- Run `task-master next` to determine the next task to work on
|
|
||||||
- Analyze dependencies to determine which tasks are ready to be worked on
|
|
||||||
- Prioritize tasks based on priority level and ID order
|
|
||||||
- Suggest the next task(s) to implement
|
|
||||||
|
|
||||||
### 2. Task Implementation
|
|
||||||
|
|
||||||
When implementing a task, the agent will:
|
|
||||||
|
|
||||||
- Reference the task's details section for implementation specifics
|
|
||||||
- Consider dependencies on previous tasks
|
|
||||||
- Follow the project's coding standards
|
|
||||||
- Create appropriate tests based on the task's testStrategy
|
|
||||||
|
|
||||||
You can ask:
|
|
||||||
|
|
||||||
```
|
|
||||||
Let's implement task 3. What does it involve?
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Task Verification
|
|
||||||
|
|
||||||
Before marking a task as complete, verify it according to:
|
|
||||||
|
|
||||||
- The task's specified testStrategy
|
|
||||||
- Any automated tests in the codebase
|
|
||||||
- Manual verification if required
|
|
||||||
|
|
||||||
### 4. Task Completion
|
|
||||||
|
|
||||||
When a task is completed, tell the agent:
|
|
||||||
|
|
||||||
```
|
|
||||||
Task 3 is now complete. Please update its status.
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will execute:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master set-status --id=3 --status=done
|
|
||||||
```
|
|
||||||
|
|
||||||
### 5. Handling Implementation Drift
|
|
||||||
|
|
||||||
If during implementation, you discover that:
|
|
||||||
|
|
||||||
- The current approach differs significantly from what was planned
|
|
||||||
- Future tasks need to be modified due to current implementation choices
|
|
||||||
- New dependencies or requirements have emerged
|
|
||||||
|
|
||||||
Tell the agent:
|
|
||||||
|
|
||||||
```
|
|
||||||
We've changed our approach. We're now using Express instead of Fastify. Please update all future tasks to reflect this change.
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will execute:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master update --from=4 --prompt="Now we are using Express instead of Fastify."
|
|
||||||
```
|
|
||||||
|
|
||||||
This will rewrite or re-scope subsequent tasks in tasks.json while preserving completed work.
|
|
||||||
|
|
||||||
### 6. Breaking Down Complex Tasks
|
|
||||||
|
|
||||||
For complex tasks that need more granularity:
|
|
||||||
|
|
||||||
```
|
|
||||||
Task 5 seems complex. Can you break it down into subtasks?
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will execute:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master expand --id=5 --num=3
|
|
||||||
```
|
|
||||||
|
|
||||||
You can provide additional context:
|
|
||||||
|
|
||||||
```
|
|
||||||
Please break down task 5 with a focus on security considerations.
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will execute:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master expand --id=5 --prompt="Focus on security aspects"
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also expand all pending tasks:
|
|
||||||
|
|
||||||
```
|
|
||||||
Please break down all pending tasks into subtasks.
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will execute:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master expand --all
|
|
||||||
```
|
|
||||||
|
|
||||||
For research-backed subtask generation using Perplexity AI:
|
|
||||||
|
|
||||||
```
|
|
||||||
Please break down task 5 using research-backed generation.
|
|
||||||
```
|
|
||||||
|
|
||||||
The agent will execute:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
task-master expand --id=5 --research
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example Cursor AI Interactions
|
|
||||||
|
|
||||||
### Starting a new project
|
|
||||||
|
|
||||||
```
|
|
||||||
I've just initialized a new project with Claude Task Master. I have a PRD at scripts/prd.txt.
|
|
||||||
Can you help me parse it and set up the initial tasks?
|
|
||||||
```
|
|
||||||
|
|
||||||
### Working on tasks
|
|
||||||
|
|
||||||
```
|
|
||||||
What's the next task I should work on? Please consider dependencies and priorities.
|
|
||||||
```
|
|
||||||
|
|
||||||
### Implementing a specific task
|
|
||||||
|
|
||||||
```
|
|
||||||
I'd like to implement task 4. Can you help me understand what needs to be done and how to approach it?
|
|
||||||
```
|
|
||||||
|
|
||||||
### Managing subtasks
|
|
||||||
|
|
||||||
```
|
|
||||||
I need to regenerate the subtasks for task 3 with a different approach. Can you help me clear and regenerate them?
|
|
||||||
```
|
|
||||||
|
|
||||||
### Handling changes
|
|
||||||
|
|
||||||
```
|
|
||||||
We've decided to use MongoDB instead of PostgreSQL. Can you update all future tasks to reflect this change?
|
|
||||||
```
|
|
||||||
|
|
||||||
### Completing work
|
|
||||||
|
|
||||||
```
|
|
||||||
I've finished implementing the authentication system described in task 2. All tests are passing.
|
|
||||||
Please mark it as complete and tell me what I should work on next.
|
|
||||||
```
|
|
||||||
|
|
||||||
### Analyzing complexity
|
|
||||||
|
|
||||||
```
|
|
||||||
Can you analyze the complexity of our tasks to help me understand which ones need to be broken down further?
|
|
||||||
```
|
|
||||||
|
|
||||||
### Viewing complexity report
|
|
||||||
|
|
||||||
```
|
|
||||||
Can you show me the complexity report in a more readable format?
|
|
||||||
```
|
|
||||||
41
entries.json
Normal file
41
entries.json
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
|
||||||
|
# Path to Cursor's history folder
|
||||||
|
history_path = os.path.expanduser('~/Library/Application Support/Cursor/User/History')
|
||||||
|
|
||||||
|
# File to search for
|
||||||
|
target_file = 'tasks/tasks.json'
|
||||||
|
|
||||||
|
# Function to search through all entries.json files
|
||||||
|
def search_entries_for_file(history_path, target_file):
|
||||||
|
matching_folders = []
|
||||||
|
for folder in os.listdir(history_path):
|
||||||
|
folder_path = os.path.join(history_path, folder)
|
||||||
|
if not os.path.isdir(folder_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Look for entries.json
|
||||||
|
entries_file = os.path.join(folder_path, 'entries.json')
|
||||||
|
if not os.path.exists(entries_file):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Parse entries.json to find the resource key
|
||||||
|
with open(entries_file, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
resource = data.get('resource', None)
|
||||||
|
if resource and target_file in resource:
|
||||||
|
matching_folders.append(folder_path)
|
||||||
|
|
||||||
|
return matching_folders
|
||||||
|
|
||||||
|
# Search for the target file
|
||||||
|
matching_folders = search_entries_for_file(history_path, target_file)
|
||||||
|
|
||||||
|
# Output the matching folders
|
||||||
|
if matching_folders:
|
||||||
|
print(f"Found {target_file} in the following folders:")
|
||||||
|
for folder in matching_folders:
|
||||||
|
print(folder)
|
||||||
|
else:
|
||||||
|
print(f"No matches found for {target_file}.")
|
||||||
198
index.js
198
index.js
@@ -41,23 +41,27 @@ export const devScriptPath = resolve(__dirname, './scripts/dev.js');
|
|||||||
|
|
||||||
// Export a function to initialize a new project programmatically
|
// Export a function to initialize a new project programmatically
|
||||||
export const initProject = async (options = {}) => {
|
export const initProject = async (options = {}) => {
|
||||||
const init = await import('./scripts/init.js');
|
const init = await import('./scripts/init.js');
|
||||||
return init.initializeProject(options);
|
return init.initializeProject(options);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Export a function to run init as a CLI command
|
// Export a function to run init as a CLI command
|
||||||
export const runInitCLI = async (options = {}) => {
|
export const runInitCLI = async () => {
|
||||||
try {
|
// Using spawn to ensure proper handling of stdio and process exit
|
||||||
const init = await import('./scripts/init.js');
|
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
|
||||||
const result = await init.initializeProject(options);
|
stdio: 'inherit',
|
||||||
return result;
|
cwd: process.cwd()
|
||||||
} catch (error) {
|
});
|
||||||
console.error('Initialization failed:', error.message);
|
|
||||||
if (process.env.DEBUG === 'true') {
|
return new Promise((resolve, reject) => {
|
||||||
console.error('Debug stack trace:', error.stack);
|
child.on('close', (code) => {
|
||||||
}
|
if (code === 0) {
|
||||||
throw error; // Re-throw to be handled by the command handler
|
resolve();
|
||||||
}
|
} else {
|
||||||
|
reject(new Error(`Init script exited with code ${code}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
// Export version information
|
// Export version information
|
||||||
@@ -65,91 +69,81 @@ export const version = packageJson.version;
|
|||||||
|
|
||||||
// CLI implementation
|
// CLI implementation
|
||||||
if (import.meta.url === `file://${process.argv[1]}`) {
|
if (import.meta.url === `file://${process.argv[1]}`) {
|
||||||
const program = new Command();
|
const program = new Command();
|
||||||
|
|
||||||
program
|
program
|
||||||
.name('task-master')
|
.name('task-master')
|
||||||
.description('Claude Task Master CLI')
|
.description('Claude Task Master CLI')
|
||||||
.version(version);
|
.version(version);
|
||||||
|
|
||||||
program
|
program
|
||||||
.command('init')
|
.command('init')
|
||||||
.description('Initialize a new project')
|
.description('Initialize a new project')
|
||||||
.option('-y, --yes', 'Skip prompts and use default values')
|
.action(() => {
|
||||||
.option('-n, --name <n>', 'Project name')
|
runInitCLI().catch(err => {
|
||||||
.option('-d, --description <description>', 'Project description')
|
console.error('Init failed:', err.message);
|
||||||
.option('-v, --version <version>', 'Project version', '0.1.0')
|
process.exit(1);
|
||||||
.option('-a, --author <author>', 'Author name')
|
});
|
||||||
.option('--skip-install', 'Skip installing dependencies')
|
});
|
||||||
.option('--dry-run', 'Show what would be done without making changes')
|
|
||||||
.option('--aliases', 'Add shell aliases (tm, taskmaster)')
|
program
|
||||||
.action(async (cmdOptions) => {
|
.command('dev')
|
||||||
try {
|
.description('Run the dev.js script')
|
||||||
await runInitCLI(cmdOptions);
|
.allowUnknownOption(true)
|
||||||
} catch (err) {
|
.action(() => {
|
||||||
console.error('Init failed:', err.message);
|
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
||||||
process.exit(1);
|
const child = spawn('node', [devScriptPath, ...args], {
|
||||||
}
|
stdio: 'inherit',
|
||||||
});
|
cwd: process.cwd()
|
||||||
|
});
|
||||||
program
|
|
||||||
.command('dev')
|
child.on('close', (code) => {
|
||||||
.description('Run the dev.js script')
|
process.exit(code);
|
||||||
.allowUnknownOption(true)
|
});
|
||||||
.action(() => {
|
});
|
||||||
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
|
||||||
const child = spawn('node', [devScriptPath, ...args], {
|
// Add shortcuts for common dev.js commands
|
||||||
stdio: 'inherit',
|
program
|
||||||
cwd: process.cwd()
|
.command('list')
|
||||||
});
|
.description('List all tasks')
|
||||||
|
.action(() => {
|
||||||
child.on('close', (code) => {
|
const child = spawn('node', [devScriptPath, 'list'], {
|
||||||
process.exit(code);
|
stdio: 'inherit',
|
||||||
});
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add shortcuts for common dev.js commands
|
child.on('close', (code) => {
|
||||||
program
|
process.exit(code);
|
||||||
.command('list')
|
});
|
||||||
.description('List all tasks')
|
});
|
||||||
.action(() => {
|
|
||||||
const child = spawn('node', [devScriptPath, 'list'], {
|
program
|
||||||
stdio: 'inherit',
|
.command('next')
|
||||||
cwd: process.cwd()
|
.description('Show the next task to work on')
|
||||||
});
|
.action(() => {
|
||||||
|
const child = spawn('node', [devScriptPath, 'next'], {
|
||||||
child.on('close', (code) => {
|
stdio: 'inherit',
|
||||||
process.exit(code);
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
});
|
|
||||||
|
child.on('close', (code) => {
|
||||||
program
|
process.exit(code);
|
||||||
.command('next')
|
});
|
||||||
.description('Show the next task to work on')
|
});
|
||||||
.action(() => {
|
|
||||||
const child = spawn('node', [devScriptPath, 'next'], {
|
program
|
||||||
stdio: 'inherit',
|
.command('generate')
|
||||||
cwd: process.cwd()
|
.description('Generate task files')
|
||||||
});
|
.action(() => {
|
||||||
|
const child = spawn('node', [devScriptPath, 'generate'], {
|
||||||
child.on('close', (code) => {
|
stdio: 'inherit',
|
||||||
process.exit(code);
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
});
|
|
||||||
|
child.on('close', (code) => {
|
||||||
program
|
process.exit(code);
|
||||||
.command('generate')
|
});
|
||||||
.description('Generate task files')
|
});
|
||||||
.action(() => {
|
|
||||||
const child = spawn('node', [devScriptPath, 'generate'], {
|
program.parse(process.argv);
|
||||||
stdio: 'inherit',
|
}
|
||||||
cwd: process.cwd()
|
|
||||||
});
|
|
||||||
|
|
||||||
child.on('close', (code) => {
|
|
||||||
process.exit(code);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
program.parse(process.argv);
|
|
||||||
}
|
|
||||||
110
jest.config.js
110
jest.config.js
@@ -1,56 +1,56 @@
|
|||||||
export default {
|
export default {
|
||||||
// Use Node.js environment for testing
|
// Use Node.js environment for testing
|
||||||
testEnvironment: 'node',
|
testEnvironment: 'node',
|
||||||
|
|
||||||
// Automatically clear mock calls between every test
|
// Automatically clear mock calls between every test
|
||||||
clearMocks: true,
|
clearMocks: true,
|
||||||
|
|
||||||
// Indicates whether the coverage information should be collected while executing the test
|
// Indicates whether the coverage information should be collected while executing the test
|
||||||
collectCoverage: false,
|
collectCoverage: false,
|
||||||
|
|
||||||
// The directory where Jest should output its coverage files
|
// The directory where Jest should output its coverage files
|
||||||
coverageDirectory: 'coverage',
|
coverageDirectory: 'coverage',
|
||||||
|
|
||||||
// A list of paths to directories that Jest should use to search for files in
|
// A list of paths to directories that Jest should use to search for files in
|
||||||
roots: ['<rootDir>/tests'],
|
roots: ['<rootDir>/tests'],
|
||||||
|
|
||||||
// The glob patterns Jest uses to detect test files
|
// The glob patterns Jest uses to detect test files
|
||||||
testMatch: [
|
testMatch: [
|
||||||
'**/__tests__/**/*.js',
|
'**/__tests__/**/*.js',
|
||||||
'**/?(*.)+(spec|test).js',
|
'**/?(*.)+(spec|test).js',
|
||||||
'**/tests/*.test.js'
|
'**/tests/*.test.js'
|
||||||
],
|
],
|
||||||
|
|
||||||
// Transform files
|
// Transform files
|
||||||
transform: {},
|
transform: {},
|
||||||
|
|
||||||
// Disable transformations for node_modules
|
// Disable transformations for node_modules
|
||||||
transformIgnorePatterns: ['/node_modules/'],
|
transformIgnorePatterns: ['/node_modules/'],
|
||||||
|
|
||||||
// Set moduleNameMapper for absolute paths
|
// Set moduleNameMapper for absolute paths
|
||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'^@/(.*)$': '<rootDir>/$1'
|
'^@/(.*)$': '<rootDir>/$1'
|
||||||
},
|
},
|
||||||
|
|
||||||
// Setup module aliases
|
// Setup module aliases
|
||||||
moduleDirectories: ['node_modules', '<rootDir>'],
|
moduleDirectories: ['node_modules', '<rootDir>'],
|
||||||
|
|
||||||
// Configure test coverage thresholds
|
// Configure test coverage thresholds
|
||||||
coverageThreshold: {
|
coverageThreshold: {
|
||||||
global: {
|
global: {
|
||||||
branches: 80,
|
branches: 80,
|
||||||
functions: 80,
|
functions: 80,
|
||||||
lines: 80,
|
lines: 80,
|
||||||
statements: 80
|
statements: 80
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
// Generate coverage report in these formats
|
// Generate coverage report in these formats
|
||||||
coverageReporters: ['text', 'lcov'],
|
coverageReporters: ['text', 'lcov'],
|
||||||
|
|
||||||
// Verbose output
|
// Verbose output
|
||||||
verbose: true,
|
verbose: true,
|
||||||
|
|
||||||
// Setup file
|
// Setup file
|
||||||
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
|
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
|
||||||
};
|
};
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
#!/usr/bin/env node
|
#!/usr/bin/env node
|
||||||
|
|
||||||
import TaskMasterMCPServer from './src/index.js';
|
import TaskMasterMCPServer from "./src/index.js";
|
||||||
import dotenv from 'dotenv';
|
import dotenv from "dotenv";
|
||||||
import logger from './src/logger.js';
|
import logger from "./src/logger.js";
|
||||||
|
|
||||||
// Load environment variables
|
// Load environment variables
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
@@ -11,25 +11,25 @@ dotenv.config();
|
|||||||
* Start the MCP server
|
* Start the MCP server
|
||||||
*/
|
*/
|
||||||
async function startServer() {
|
async function startServer() {
|
||||||
const server = new TaskMasterMCPServer();
|
const server = new TaskMasterMCPServer();
|
||||||
|
|
||||||
// Handle graceful shutdown
|
// Handle graceful shutdown
|
||||||
process.on('SIGINT', async () => {
|
process.on("SIGINT", async () => {
|
||||||
await server.stop();
|
await server.stop();
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
process.on('SIGTERM', async () => {
|
process.on("SIGTERM", async () => {
|
||||||
await server.stop();
|
await server.stop();
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await server.start();
|
await server.start();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`Failed to start MCP server: ${error.message}`);
|
logger.error(`Failed to start MCP server: ${error.message}`);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the server
|
// Start the server
|
||||||
|
|||||||
@@ -2,90 +2,84 @@ import { jest } from '@jest/globals';
|
|||||||
import { ContextManager } from '../context-manager.js';
|
import { ContextManager } from '../context-manager.js';
|
||||||
|
|
||||||
describe('ContextManager', () => {
|
describe('ContextManager', () => {
|
||||||
let contextManager;
|
let contextManager;
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
contextManager = new ContextManager({
|
contextManager = new ContextManager({
|
||||||
maxCacheSize: 10,
|
maxCacheSize: 10,
|
||||||
ttl: 1000, // 1 second for testing
|
ttl: 1000, // 1 second for testing
|
||||||
maxContextSize: 1000
|
maxContextSize: 1000
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('getContext', () => {
|
describe('getContext', () => {
|
||||||
it('should create a new context when not in cache', async () => {
|
it('should create a new context when not in cache', async () => {
|
||||||
const context = await contextManager.getContext('test-id', {
|
const context = await contextManager.getContext('test-id', { test: true });
|
||||||
test: true
|
expect(context.id).toBe('test-id');
|
||||||
});
|
expect(context.metadata.test).toBe(true);
|
||||||
expect(context.id).toBe('test-id');
|
expect(contextManager.stats.misses).toBe(1);
|
||||||
expect(context.metadata.test).toBe(true);
|
expect(contextManager.stats.hits).toBe(0);
|
||||||
expect(contextManager.stats.misses).toBe(1);
|
});
|
||||||
expect(contextManager.stats.hits).toBe(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return cached context when available', async () => {
|
it('should return cached context when available', async () => {
|
||||||
// First call creates the context
|
// First call creates the context
|
||||||
await contextManager.getContext('test-id', { test: true });
|
await contextManager.getContext('test-id', { test: true });
|
||||||
|
|
||||||
|
// Second call should hit cache
|
||||||
|
const context = await contextManager.getContext('test-id', { test: true });
|
||||||
|
expect(context.id).toBe('test-id');
|
||||||
|
expect(context.metadata.test).toBe(true);
|
||||||
|
expect(contextManager.stats.hits).toBe(1);
|
||||||
|
expect(contextManager.stats.misses).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
// Second call should hit cache
|
it('should respect TTL settings', async () => {
|
||||||
const context = await contextManager.getContext('test-id', {
|
// Create context
|
||||||
test: true
|
await contextManager.getContext('test-id', { test: true });
|
||||||
});
|
|
||||||
expect(context.id).toBe('test-id');
|
// Wait for TTL to expire
|
||||||
expect(context.metadata.test).toBe(true);
|
await new Promise(resolve => setTimeout(resolve, 1100));
|
||||||
expect(contextManager.stats.hits).toBe(1);
|
|
||||||
expect(contextManager.stats.misses).toBe(1);
|
// Should create new context
|
||||||
});
|
await contextManager.getContext('test-id', { test: true });
|
||||||
|
expect(contextManager.stats.misses).toBe(2);
|
||||||
|
expect(contextManager.stats.hits).toBe(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
it('should respect TTL settings', async () => {
|
describe('updateContext', () => {
|
||||||
// Create context
|
it('should update existing context metadata', async () => {
|
||||||
await contextManager.getContext('test-id', { test: true });
|
await contextManager.getContext('test-id', { initial: true });
|
||||||
|
const updated = await contextManager.updateContext('test-id', { updated: true });
|
||||||
|
|
||||||
|
expect(updated.metadata.initial).toBe(true);
|
||||||
|
expect(updated.metadata.updated).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
// Wait for TTL to expire
|
describe('invalidateContext', () => {
|
||||||
await new Promise((resolve) => setTimeout(resolve, 1100));
|
it('should remove context from cache', async () => {
|
||||||
|
await contextManager.getContext('test-id', { test: true });
|
||||||
|
contextManager.invalidateContext('test-id', { test: true });
|
||||||
|
|
||||||
|
// Should be a cache miss
|
||||||
|
await contextManager.getContext('test-id', { test: true });
|
||||||
|
expect(contextManager.stats.invalidations).toBe(1);
|
||||||
|
expect(contextManager.stats.misses).toBe(2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
// Should create new context
|
describe('getStats', () => {
|
||||||
await contextManager.getContext('test-id', { test: true });
|
it('should return current cache statistics', async () => {
|
||||||
expect(contextManager.stats.misses).toBe(2);
|
await contextManager.getContext('test-id', { test: true });
|
||||||
expect(contextManager.stats.hits).toBe(0);
|
const stats = contextManager.getStats();
|
||||||
});
|
|
||||||
});
|
expect(stats.hits).toBe(0);
|
||||||
|
expect(stats.misses).toBe(1);
|
||||||
describe('updateContext', () => {
|
expect(stats.invalidations).toBe(0);
|
||||||
it('should update existing context metadata', async () => {
|
expect(stats.size).toBe(1);
|
||||||
await contextManager.getContext('test-id', { initial: true });
|
expect(stats.maxSize).toBe(10);
|
||||||
const updated = await contextManager.updateContext('test-id', {
|
expect(stats.ttl).toBe(1000);
|
||||||
updated: true
|
});
|
||||||
});
|
});
|
||||||
|
});
|
||||||
expect(updated.metadata.initial).toBe(true);
|
|
||||||
expect(updated.metadata.updated).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('invalidateContext', () => {
|
|
||||||
it('should remove context from cache', async () => {
|
|
||||||
await contextManager.getContext('test-id', { test: true });
|
|
||||||
contextManager.invalidateContext('test-id', { test: true });
|
|
||||||
|
|
||||||
// Should be a cache miss
|
|
||||||
await contextManager.getContext('test-id', { test: true });
|
|
||||||
expect(contextManager.stats.invalidations).toBe(1);
|
|
||||||
expect(contextManager.stats.misses).toBe(2);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('getStats', () => {
|
|
||||||
it('should return current cache statistics', async () => {
|
|
||||||
await contextManager.getContext('test-id', { test: true });
|
|
||||||
const stats = contextManager.getStats();
|
|
||||||
|
|
||||||
expect(stats.hits).toBe(0);
|
|
||||||
expect(stats.misses).toBe(1);
|
|
||||||
expect(stats.invalidations).toBe(0);
|
|
||||||
expect(stats.size).toBe(1);
|
|
||||||
expect(stats.maxSize).toBe(10);
|
|
||||||
expect(stats.ttl).toBe(1000);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -15,157 +15,156 @@ import { LRUCache } from 'lru-cache';
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
export class ContextManager {
|
export class ContextManager {
|
||||||
/**
|
/**
|
||||||
* Create a new ContextManager instance
|
* Create a new ContextManager instance
|
||||||
* @param {ContextManagerConfig} config - Configuration options
|
* @param {ContextManagerConfig} config - Configuration options
|
||||||
*/
|
*/
|
||||||
constructor(config = {}) {
|
constructor(config = {}) {
|
||||||
this.config = {
|
this.config = {
|
||||||
maxCacheSize: config.maxCacheSize || 1000,
|
maxCacheSize: config.maxCacheSize || 1000,
|
||||||
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
|
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
|
||||||
maxContextSize: config.maxContextSize || 4000
|
maxContextSize: config.maxContextSize || 4000
|
||||||
};
|
};
|
||||||
|
|
||||||
// Initialize LRU cache for context data
|
// Initialize LRU cache for context data
|
||||||
this.cache = new LRUCache({
|
this.cache = new LRUCache({
|
||||||
max: this.config.maxCacheSize,
|
max: this.config.maxCacheSize,
|
||||||
ttl: this.config.ttl,
|
ttl: this.config.ttl,
|
||||||
updateAgeOnGet: true
|
updateAgeOnGet: true
|
||||||
});
|
});
|
||||||
|
|
||||||
// Cache statistics
|
// Cache statistics
|
||||||
this.stats = {
|
this.stats = {
|
||||||
hits: 0,
|
hits: 0,
|
||||||
misses: 0,
|
misses: 0,
|
||||||
invalidations: 0
|
invalidations: 0
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new context or retrieve from cache
|
* Create a new context or retrieve from cache
|
||||||
* @param {string} contextId - Unique identifier for the context
|
* @param {string} contextId - Unique identifier for the context
|
||||||
* @param {Object} metadata - Additional metadata for the context
|
* @param {Object} metadata - Additional metadata for the context
|
||||||
* @returns {Object} Context object with metadata
|
* @returns {Object} Context object with metadata
|
||||||
*/
|
*/
|
||||||
async getContext(contextId, metadata = {}) {
|
async getContext(contextId, metadata = {}) {
|
||||||
const cacheKey = this._getCacheKey(contextId, metadata);
|
const cacheKey = this._getCacheKey(contextId, metadata);
|
||||||
|
|
||||||
|
// Try to get from cache first
|
||||||
|
const cached = this.cache.get(cacheKey);
|
||||||
|
if (cached) {
|
||||||
|
this.stats.hits++;
|
||||||
|
return cached;
|
||||||
|
}
|
||||||
|
|
||||||
// Try to get from cache first
|
this.stats.misses++;
|
||||||
const cached = this.cache.get(cacheKey);
|
|
||||||
if (cached) {
|
// Create new context if not in cache
|
||||||
this.stats.hits++;
|
const context = {
|
||||||
return cached;
|
id: contextId,
|
||||||
}
|
metadata: {
|
||||||
|
...metadata,
|
||||||
|
created: new Date().toISOString()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
this.stats.misses++;
|
// Cache the new context
|
||||||
|
this.cache.set(cacheKey, context);
|
||||||
|
|
||||||
|
return context;
|
||||||
|
}
|
||||||
|
|
||||||
// Create new context if not in cache
|
/**
|
||||||
const context = {
|
* Update an existing context
|
||||||
id: contextId,
|
* @param {string} contextId - Context identifier
|
||||||
metadata: {
|
* @param {Object} updates - Updates to apply to the context
|
||||||
...metadata,
|
* @returns {Object} Updated context
|
||||||
created: new Date().toISOString()
|
*/
|
||||||
}
|
async updateContext(contextId, updates) {
|
||||||
};
|
const context = await this.getContext(contextId);
|
||||||
|
|
||||||
|
// Apply updates to context
|
||||||
|
Object.assign(context.metadata, updates);
|
||||||
|
|
||||||
|
// Update cache
|
||||||
|
const cacheKey = this._getCacheKey(contextId, context.metadata);
|
||||||
|
this.cache.set(cacheKey, context);
|
||||||
|
|
||||||
|
return context;
|
||||||
|
}
|
||||||
|
|
||||||
// Cache the new context
|
/**
|
||||||
this.cache.set(cacheKey, context);
|
* Invalidate a context in the cache
|
||||||
|
* @param {string} contextId - Context identifier
|
||||||
|
* @param {Object} metadata - Metadata used in the cache key
|
||||||
|
*/
|
||||||
|
invalidateContext(contextId, metadata = {}) {
|
||||||
|
const cacheKey = this._getCacheKey(contextId, metadata);
|
||||||
|
this.cache.delete(cacheKey);
|
||||||
|
this.stats.invalidations++;
|
||||||
|
}
|
||||||
|
|
||||||
return context;
|
/**
|
||||||
}
|
* Get cached data associated with a specific key.
|
||||||
|
* Increments cache hit stats if found.
|
||||||
|
* @param {string} key - The cache key.
|
||||||
|
* @returns {any | undefined} The cached data or undefined if not found/expired.
|
||||||
|
*/
|
||||||
|
getCachedData(key) {
|
||||||
|
const cached = this.cache.get(key);
|
||||||
|
if (cached !== undefined) { // Check for undefined specifically, as null/false might be valid cached values
|
||||||
|
this.stats.hits++;
|
||||||
|
return cached;
|
||||||
|
}
|
||||||
|
this.stats.misses++;
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update an existing context
|
* Set data in the cache with a specific key.
|
||||||
* @param {string} contextId - Context identifier
|
* @param {string} key - The cache key.
|
||||||
* @param {Object} updates - Updates to apply to the context
|
* @param {any} data - The data to cache.
|
||||||
* @returns {Object} Updated context
|
*/
|
||||||
*/
|
setCachedData(key, data) {
|
||||||
async updateContext(contextId, updates) {
|
this.cache.set(key, data);
|
||||||
const context = await this.getContext(contextId);
|
}
|
||||||
|
|
||||||
// Apply updates to context
|
/**
|
||||||
Object.assign(context.metadata, updates);
|
* Invalidate a specific cache key.
|
||||||
|
* Increments invalidation stats.
|
||||||
|
* @param {string} key - The cache key to invalidate.
|
||||||
|
*/
|
||||||
|
invalidateCacheKey(key) {
|
||||||
|
this.cache.delete(key);
|
||||||
|
this.stats.invalidations++;
|
||||||
|
}
|
||||||
|
|
||||||
// Update cache
|
/**
|
||||||
const cacheKey = this._getCacheKey(contextId, context.metadata);
|
* Get cache statistics
|
||||||
this.cache.set(cacheKey, context);
|
* @returns {Object} Cache statistics
|
||||||
|
*/
|
||||||
|
getStats() {
|
||||||
|
return {
|
||||||
|
hits: this.stats.hits,
|
||||||
|
misses: this.stats.misses,
|
||||||
|
invalidations: this.stats.invalidations,
|
||||||
|
size: this.cache.size,
|
||||||
|
maxSize: this.config.maxCacheSize,
|
||||||
|
ttl: this.config.ttl
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
return context;
|
/**
|
||||||
}
|
* Generate a cache key from context ID and metadata
|
||||||
|
* @private
|
||||||
/**
|
* @deprecated No longer used for direct cache key generation outside the manager.
|
||||||
* Invalidate a context in the cache
|
* Prefer generating specific keys in calling functions.
|
||||||
* @param {string} contextId - Context identifier
|
*/
|
||||||
* @param {Object} metadata - Metadata used in the cache key
|
_getCacheKey(contextId, metadata) {
|
||||||
*/
|
// Kept for potential backward compatibility or internal use if needed later.
|
||||||
invalidateContext(contextId, metadata = {}) {
|
return `${contextId}:${JSON.stringify(metadata)}`;
|
||||||
const cacheKey = this._getCacheKey(contextId, metadata);
|
}
|
||||||
this.cache.delete(cacheKey);
|
|
||||||
this.stats.invalidations++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get cached data associated with a specific key.
|
|
||||||
* Increments cache hit stats if found.
|
|
||||||
* @param {string} key - The cache key.
|
|
||||||
* @returns {any | undefined} The cached data or undefined if not found/expired.
|
|
||||||
*/
|
|
||||||
getCachedData(key) {
|
|
||||||
const cached = this.cache.get(key);
|
|
||||||
if (cached !== undefined) {
|
|
||||||
// Check for undefined specifically, as null/false might be valid cached values
|
|
||||||
this.stats.hits++;
|
|
||||||
return cached;
|
|
||||||
}
|
|
||||||
this.stats.misses++;
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set data in the cache with a specific key.
|
|
||||||
* @param {string} key - The cache key.
|
|
||||||
* @param {any} data - The data to cache.
|
|
||||||
*/
|
|
||||||
setCachedData(key, data) {
|
|
||||||
this.cache.set(key, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Invalidate a specific cache key.
|
|
||||||
* Increments invalidation stats.
|
|
||||||
* @param {string} key - The cache key to invalidate.
|
|
||||||
*/
|
|
||||||
invalidateCacheKey(key) {
|
|
||||||
this.cache.delete(key);
|
|
||||||
this.stats.invalidations++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get cache statistics
|
|
||||||
* @returns {Object} Cache statistics
|
|
||||||
*/
|
|
||||||
getStats() {
|
|
||||||
return {
|
|
||||||
hits: this.stats.hits,
|
|
||||||
misses: this.stats.misses,
|
|
||||||
invalidations: this.stats.invalidations,
|
|
||||||
size: this.cache.size,
|
|
||||||
maxSize: this.config.maxCacheSize,
|
|
||||||
ttl: this.config.ttl
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generate a cache key from context ID and metadata
|
|
||||||
* @private
|
|
||||||
* @deprecated No longer used for direct cache key generation outside the manager.
|
|
||||||
* Prefer generating specific keys in calling functions.
|
|
||||||
*/
|
|
||||||
_getCacheKey(contextId, metadata) {
|
|
||||||
// Kept for potential backward compatibility or internal use if needed later.
|
|
||||||
return `${contextId}:${JSON.stringify(metadata)}`;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export a singleton instance with default config
|
// Export a singleton instance with default config
|
||||||
export const contextManager = new ContextManager();
|
export const contextManager = new ContextManager();
|
||||||
@@ -4,103 +4,82 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { addDependency } from '../../../../scripts/modules/dependency-manager.js';
|
import { addDependency } from '../../../../scripts/modules/dependency-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for addDependency with error handling.
|
* Direct function wrapper for addDependency with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments
|
* @param {Object} args - Command arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {string|number} args.id - Task ID to add dependency to
|
* @param {string|number} args.id - Task ID to add dependency to
|
||||||
* @param {string|number} args.dependsOn - Task ID that will become a dependency
|
* @param {string|number} args.dependsOn - Task ID that will become a dependency
|
||||||
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information
|
* @returns {Promise<Object>} - Result object with success status and data/error information
|
||||||
*/
|
*/
|
||||||
export async function addDependencyDirect(args, log) {
|
export async function addDependencyDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath, id, dependsOn } = args;
|
log.info(`Adding dependency with args: ${JSON.stringify(args)}`);
|
||||||
try {
|
|
||||||
log.info(`Adding dependency with args: ${JSON.stringify(args)}`);
|
// Validate required parameters
|
||||||
|
if (!args.id) {
|
||||||
// Check if tasksJsonPath was provided
|
return {
|
||||||
if (!tasksJsonPath) {
|
success: false,
|
||||||
log.error('addDependencyDirect called without tasksJsonPath');
|
error: {
|
||||||
return {
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
success: false,
|
message: 'Task ID (id) is required'
|
||||||
error: {
|
}
|
||||||
code: 'MISSING_ARGUMENT',
|
};
|
||||||
message: 'tasksJsonPath is required'
|
}
|
||||||
}
|
|
||||||
};
|
if (!args.dependsOn) {
|
||||||
}
|
return {
|
||||||
|
success: false,
|
||||||
// Validate required parameters
|
error: {
|
||||||
if (!id) {
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
return {
|
message: 'Dependency ID (dependsOn) is required'
|
||||||
success: false,
|
}
|
||||||
error: {
|
};
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
}
|
||||||
message: 'Task ID (id) is required'
|
|
||||||
}
|
// Find the tasks.json path
|
||||||
};
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
}
|
|
||||||
|
// Format IDs for the core function
|
||||||
if (!dependsOn) {
|
const taskId = args.id.includes && args.id.includes('.') ? args.id : parseInt(args.id, 10);
|
||||||
return {
|
const dependencyId = args.dependsOn.includes && args.dependsOn.includes('.') ? args.dependsOn : parseInt(args.dependsOn, 10);
|
||||||
success: false,
|
|
||||||
error: {
|
log.info(`Adding dependency: task ${taskId} will depend on ${dependencyId}`);
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
|
||||||
message: 'Dependency ID (dependsOn) is required'
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
}
|
enableSilentMode();
|
||||||
};
|
|
||||||
}
|
// Call the core function
|
||||||
|
await addDependency(tasksPath, taskId, dependencyId);
|
||||||
// Use provided path
|
|
||||||
const tasksPath = tasksJsonPath;
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
// Format IDs for the core function
|
|
||||||
const taskId =
|
return {
|
||||||
id && id.includes && id.includes('.') ? id : parseInt(id, 10);
|
success: true,
|
||||||
const dependencyId =
|
data: {
|
||||||
dependsOn && dependsOn.includes && dependsOn.includes('.')
|
message: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,
|
||||||
? dependsOn
|
taskId: taskId,
|
||||||
: parseInt(dependsOn, 10);
|
dependencyId: dependencyId
|
||||||
|
}
|
||||||
log.info(
|
};
|
||||||
`Adding dependency: task ${taskId} will depend on ${dependencyId}`
|
} catch (error) {
|
||||||
);
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
log.error(`Error in addDependencyDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
// Call the core function using the provided path
|
success: false,
|
||||||
await addDependency(tasksPath, taskId, dependencyId);
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
// Restore normal logging
|
message: error.message
|
||||||
disableSilentMode();
|
}
|
||||||
|
};
|
||||||
return {
|
}
|
||||||
success: true,
|
}
|
||||||
data: {
|
|
||||||
message: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,
|
|
||||||
taskId: taskId,
|
|
||||||
dependencyId: dependencyId
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error in addDependencyDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,15 +3,12 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { addSubtask } from '../../../../scripts/modules/task-manager.js';
|
import { addSubtask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a subtask to an existing task
|
* Add a subtask to an existing task
|
||||||
* @param {Object} args - Function arguments
|
* @param {Object} args - Function arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {string} args.id - Parent task ID
|
* @param {string} args.id - Parent task ID
|
||||||
* @param {string} [args.taskId] - Existing task ID to convert to subtask (optional)
|
* @param {string} [args.taskId] - Existing task ID to convert to subtask (optional)
|
||||||
* @param {string} [args.title] - Title for new subtask (when creating a new subtask)
|
* @param {string} [args.title] - Title for new subtask (when creating a new subtask)
|
||||||
@@ -19,147 +16,113 @@ import {
|
|||||||
* @param {string} [args.details] - Implementation details for new subtask
|
* @param {string} [args.details] - Implementation details for new subtask
|
||||||
* @param {string} [args.status] - Status for new subtask (default: 'pending')
|
* @param {string} [args.status] - Status for new subtask (default: 'pending')
|
||||||
* @param {string} [args.dependencies] - Comma-separated list of dependency IDs
|
* @param {string} [args.dependencies] - Comma-separated list of dependency IDs
|
||||||
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
* @param {boolean} [args.skipGenerate] - Skip regenerating task files
|
* @param {boolean} [args.skipGenerate] - Skip regenerating task files
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<{success: boolean, data?: Object, error?: string}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: string}>}
|
||||||
*/
|
*/
|
||||||
export async function addSubtaskDirect(args, log) {
|
export async function addSubtaskDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const {
|
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
||||||
tasksJsonPath,
|
|
||||||
id,
|
if (!args.id) {
|
||||||
taskId,
|
return {
|
||||||
title,
|
success: false,
|
||||||
description,
|
error: {
|
||||||
details,
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
status,
|
message: 'Parent task ID is required'
|
||||||
dependencies: dependenciesStr,
|
}
|
||||||
skipGenerate
|
};
|
||||||
} = args;
|
}
|
||||||
try {
|
|
||||||
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
// Either taskId or title must be provided
|
||||||
|
if (!args.taskId && !args.title) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
|
message: 'Either taskId or title must be provided'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Check if tasksJsonPath was provided
|
// Find the tasks.json path
|
||||||
if (!tasksJsonPath) {
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
log.error('addSubtaskDirect called without tasksJsonPath');
|
|
||||||
return {
|
// Parse dependencies if provided
|
||||||
success: false,
|
let dependencies = [];
|
||||||
error: {
|
if (args.dependencies) {
|
||||||
code: 'MISSING_ARGUMENT',
|
dependencies = args.dependencies.split(',').map(id => {
|
||||||
message: 'tasksJsonPath is required'
|
// Handle both regular IDs and dot notation
|
||||||
}
|
return id.includes('.') ? id.trim() : parseInt(id.trim(), 10);
|
||||||
};
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!id) {
|
// Convert existingTaskId to a number if provided
|
||||||
return {
|
const existingTaskId = args.taskId ? parseInt(args.taskId, 10) : null;
|
||||||
success: false,
|
|
||||||
error: {
|
// Convert parent ID to a number
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
const parentId = parseInt(args.id, 10);
|
||||||
message: 'Parent task ID is required'
|
|
||||||
}
|
// Determine if we should generate files
|
||||||
};
|
const generateFiles = !args.skipGenerate;
|
||||||
}
|
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
// Either taskId or title must be provided
|
enableSilentMode();
|
||||||
if (!taskId && !title) {
|
|
||||||
return {
|
// Case 1: Convert existing task to subtask
|
||||||
success: false,
|
if (existingTaskId) {
|
||||||
error: {
|
log.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
const result = await addSubtask(tasksPath, parentId, existingTaskId, null, generateFiles);
|
||||||
message: 'Either taskId or title must be provided'
|
|
||||||
}
|
// Restore normal logging
|
||||||
};
|
disableSilentMode();
|
||||||
}
|
|
||||||
|
return {
|
||||||
// Use provided path
|
success: true,
|
||||||
const tasksPath = tasksJsonPath;
|
data: {
|
||||||
|
message: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,
|
||||||
// Parse dependencies if provided
|
subtask: result
|
||||||
let dependencies = [];
|
}
|
||||||
if (dependenciesStr) {
|
};
|
||||||
dependencies = dependenciesStr.split(',').map((depId) => {
|
}
|
||||||
// Handle both regular IDs and dot notation
|
// Case 2: Create new subtask
|
||||||
return depId.includes('.') ? depId.trim() : parseInt(depId.trim(), 10);
|
else {
|
||||||
});
|
log.info(`Creating new subtask for parent task ${parentId}`);
|
||||||
}
|
|
||||||
|
const newSubtaskData = {
|
||||||
// Convert existingTaskId to a number if provided
|
title: args.title,
|
||||||
const existingTaskId = taskId ? parseInt(taskId, 10) : null;
|
description: args.description || '',
|
||||||
|
details: args.details || '',
|
||||||
// Convert parent ID to a number
|
status: args.status || 'pending',
|
||||||
const parentId = parseInt(id, 10);
|
dependencies: dependencies
|
||||||
|
};
|
||||||
// Determine if we should generate files
|
|
||||||
const generateFiles = !skipGenerate;
|
const result = await addSubtask(tasksPath, parentId, null, newSubtaskData, generateFiles);
|
||||||
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Restore normal logging
|
||||||
enableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
// Case 1: Convert existing task to subtask
|
return {
|
||||||
if (existingTaskId) {
|
success: true,
|
||||||
log.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);
|
data: {
|
||||||
const result = await addSubtask(
|
message: `New subtask ${parentId}.${result.id} successfully created`,
|
||||||
tasksPath,
|
subtask: result
|
||||||
parentId,
|
}
|
||||||
existingTaskId,
|
};
|
||||||
null,
|
}
|
||||||
generateFiles
|
} catch (error) {
|
||||||
);
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
log.error(`Error in addSubtaskDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
return {
|
success: false,
|
||||||
success: true,
|
error: {
|
||||||
data: {
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
message: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,
|
message: error.message
|
||||||
subtask: result
|
}
|
||||||
}
|
};
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
// Case 2: Create new subtask
|
|
||||||
else {
|
|
||||||
log.info(`Creating new subtask for parent task ${parentId}`);
|
|
||||||
|
|
||||||
const newSubtaskData = {
|
|
||||||
title: title,
|
|
||||||
description: description || '',
|
|
||||||
details: details || '',
|
|
||||||
status: status || 'pending',
|
|
||||||
dependencies: dependencies
|
|
||||||
};
|
|
||||||
|
|
||||||
const result = await addSubtask(
|
|
||||||
tasksPath,
|
|
||||||
parentId,
|
|
||||||
null,
|
|
||||||
newSubtaskData,
|
|
||||||
generateFiles
|
|
||||||
);
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: `New subtask ${parentId}.${result.id} successfully created`,
|
|
||||||
subtask: result
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error in addSubtaskDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,256 +4,81 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { addTask } from '../../../../scripts/modules/task-manager.js';
|
import { addTask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import {
|
|
||||||
getAnthropicClientForMCP,
|
|
||||||
getModelConfig
|
|
||||||
} from '../utils/ai-client-utils.js';
|
|
||||||
import {
|
|
||||||
_buildAddTaskPrompt,
|
|
||||||
parseTaskJsonResponse,
|
|
||||||
_handleAnthropicStream
|
|
||||||
} from '../../../../scripts/modules/ai-services.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for adding a new task with error handling.
|
* Direct function wrapper for adding a new task with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments
|
* @param {Object} args - Command arguments
|
||||||
* @param {string} [args.prompt] - Description of the task to add (required if not using manual fields)
|
* @param {string} args.prompt - Description of the task to add
|
||||||
* @param {string} [args.title] - Task title (for manual task creation)
|
* @param {Array<number>} [args.dependencies=[]] - Task dependencies as array of IDs
|
||||||
* @param {string} [args.description] - Task description (for manual task creation)
|
|
||||||
* @param {string} [args.details] - Implementation details (for manual task creation)
|
|
||||||
* @param {string} [args.testStrategy] - Test strategy (for manual task creation)
|
|
||||||
* @param {string} [args.dependencies] - Comma-separated list of task IDs this task depends on
|
|
||||||
* @param {string} [args.priority='medium'] - Task priority (high, medium, low)
|
* @param {string} [args.priority='medium'] - Task priority (high, medium, low)
|
||||||
* @param {string} [args.file='tasks/tasks.json'] - Path to the tasks file
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
* @param {string} [args.projectRoot] - Project root directory
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {boolean} [args.research=false] - Whether to use research capabilities for task creation
|
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @param {Object} context - Additional context (reportProgress, session)
|
|
||||||
* @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }
|
* @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }
|
||||||
*/
|
*/
|
||||||
export async function addTaskDirect(args, log, context = {}) {
|
export async function addTaskDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath, prompt, dependencies, priority, research } = args;
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
try {
|
enableSilentMode();
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
// Find the tasks.json path
|
||||||
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
// Check if tasksJsonPath was provided
|
|
||||||
if (!tasksJsonPath) {
|
// Check required parameters
|
||||||
log.error('addTaskDirect called without tasksJsonPath');
|
if (!args.prompt) {
|
||||||
disableSilentMode(); // Disable before returning
|
log.error('Missing required parameter: prompt');
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'MISSING_ARGUMENT',
|
code: 'MISSING_PARAMETER',
|
||||||
message: 'tasksJsonPath is required'
|
message: 'The prompt parameter is required for adding a task'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use provided path
|
// Extract and prepare parameters
|
||||||
const tasksPath = tasksJsonPath;
|
const prompt = args.prompt;
|
||||||
|
const dependencies = Array.isArray(args.dependencies)
|
||||||
// Check if this is manual task creation or AI-driven task creation
|
? args.dependencies
|
||||||
const isManualCreation = args.title && args.description;
|
: (args.dependencies ? String(args.dependencies).split(',').map(id => parseInt(id.trim(), 10)) : []);
|
||||||
|
const priority = args.priority || 'medium';
|
||||||
// Check required parameters
|
|
||||||
if (!args.prompt && !isManualCreation) {
|
log.info(`Adding new task with prompt: "${prompt}", dependencies: [${dependencies.join(', ')}], priority: ${priority}`);
|
||||||
log.error(
|
|
||||||
'Missing required parameters: either prompt or title+description must be provided'
|
// Call the addTask function with 'json' outputFormat to prevent console output when called via MCP
|
||||||
);
|
const newTaskId = await addTask(
|
||||||
disableSilentMode();
|
tasksPath,
|
||||||
return {
|
prompt,
|
||||||
success: false,
|
dependencies,
|
||||||
error: {
|
priority,
|
||||||
code: 'MISSING_PARAMETER',
|
{ mcpLog: log },
|
||||||
message:
|
'json'
|
||||||
'Either the prompt parameter or both title and description parameters are required for adding a task'
|
);
|
||||||
}
|
|
||||||
};
|
// Restore normal logging
|
||||||
}
|
disableSilentMode();
|
||||||
|
|
||||||
// Extract and prepare parameters
|
return {
|
||||||
const taskPrompt = prompt;
|
success: true,
|
||||||
const taskDependencies = Array.isArray(dependencies)
|
data: {
|
||||||
? dependencies
|
taskId: newTaskId,
|
||||||
: dependencies
|
message: `Successfully added new task #${newTaskId}`
|
||||||
? String(dependencies)
|
}
|
||||||
.split(',')
|
};
|
||||||
.map((id) => parseInt(id.trim(), 10))
|
} catch (error) {
|
||||||
: [];
|
// Make sure to restore normal logging even if there's an error
|
||||||
const taskPriority = priority || 'medium';
|
disableSilentMode();
|
||||||
|
|
||||||
// Extract context parameters for advanced functionality
|
log.error(`Error in addTaskDirect: ${error.message}`);
|
||||||
const { session } = context;
|
return {
|
||||||
|
success: false,
|
||||||
let manualTaskData = null;
|
error: {
|
||||||
|
code: 'ADD_TASK_ERROR',
|
||||||
if (isManualCreation) {
|
message: error.message
|
||||||
// Create manual task data object
|
}
|
||||||
manualTaskData = {
|
};
|
||||||
title: args.title,
|
}
|
||||||
description: args.description,
|
}
|
||||||
details: args.details || '',
|
|
||||||
testStrategy: args.testStrategy || ''
|
|
||||||
};
|
|
||||||
|
|
||||||
log.info(
|
|
||||||
`Adding new task manually with title: "${args.title}", dependencies: [${taskDependencies.join(', ')}], priority: ${priority}`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Call the addTask function with manual task data
|
|
||||||
const newTaskId = await addTask(
|
|
||||||
tasksPath,
|
|
||||||
null, // No prompt needed for manual creation
|
|
||||||
taskDependencies,
|
|
||||||
priority,
|
|
||||||
{
|
|
||||||
mcpLog: log,
|
|
||||||
session
|
|
||||||
},
|
|
||||||
'json', // Use JSON output format to prevent console output
|
|
||||||
null, // No custom environment
|
|
||||||
manualTaskData // Pass the manual task data
|
|
||||||
);
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
taskId: newTaskId,
|
|
||||||
message: `Successfully added new task #${newTaskId}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
// AI-driven task creation
|
|
||||||
log.info(
|
|
||||||
`Adding new task with prompt: "${prompt}", dependencies: [${taskDependencies.join(', ')}], priority: ${priority}`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Initialize AI client with session environment
|
|
||||||
let localAnthropic;
|
|
||||||
try {
|
|
||||||
localAnthropic = getAnthropicClientForMCP(session, log);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Failed to initialize Anthropic client: ${error.message}`);
|
|
||||||
disableSilentMode();
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'AI_CLIENT_ERROR',
|
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get model configuration from session
|
|
||||||
const modelConfig = getModelConfig(session);
|
|
||||||
|
|
||||||
// Read existing tasks to provide context
|
|
||||||
let tasksData;
|
|
||||||
try {
|
|
||||||
const fs = await import('fs');
|
|
||||||
tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
|
||||||
} catch (error) {
|
|
||||||
log.warn(`Could not read existing tasks for context: ${error.message}`);
|
|
||||||
tasksData = { tasks: [] };
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build prompts for AI
|
|
||||||
const { systemPrompt, userPrompt } = _buildAddTaskPrompt(
|
|
||||||
prompt,
|
|
||||||
tasksData.tasks
|
|
||||||
);
|
|
||||||
|
|
||||||
// Make the AI call using the streaming helper
|
|
||||||
let responseText;
|
|
||||||
try {
|
|
||||||
responseText = await _handleAnthropicStream(
|
|
||||||
localAnthropic,
|
|
||||||
{
|
|
||||||
model: modelConfig.model,
|
|
||||||
max_tokens: modelConfig.maxTokens,
|
|
||||||
temperature: modelConfig.temperature,
|
|
||||||
messages: [{ role: 'user', content: userPrompt }],
|
|
||||||
system: systemPrompt
|
|
||||||
},
|
|
||||||
{
|
|
||||||
mcpLog: log
|
|
||||||
}
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`AI processing failed: ${error.message}`);
|
|
||||||
disableSilentMode();
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'AI_PROCESSING_ERROR',
|
|
||||||
message: `Failed to generate task with AI: ${error.message}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the AI response
|
|
||||||
let taskDataFromAI;
|
|
||||||
try {
|
|
||||||
taskDataFromAI = parseTaskJsonResponse(responseText);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Failed to parse AI response: ${error.message}`);
|
|
||||||
disableSilentMode();
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'RESPONSE_PARSING_ERROR',
|
|
||||||
message: `Failed to parse AI response: ${error.message}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the addTask function with 'json' outputFormat to prevent console output when called via MCP
|
|
||||||
const newTaskId = await addTask(
|
|
||||||
tasksPath,
|
|
||||||
prompt,
|
|
||||||
taskDependencies,
|
|
||||||
priority,
|
|
||||||
{
|
|
||||||
mcpLog: log,
|
|
||||||
session
|
|
||||||
},
|
|
||||||
'json',
|
|
||||||
null,
|
|
||||||
taskDataFromAI // Pass the parsed AI result as the manual task data
|
|
||||||
);
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
taskId: newTaskId,
|
|
||||||
message: `Successfully added new task #${newTaskId}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error in addTaskDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'ADD_TASK_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,180 +3,99 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { analyzeTaskComplexity } from '../../../../scripts/modules/task-manager.js';
|
import { analyzeTaskComplexity } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode,
|
|
||||||
isSilentMode,
|
|
||||||
readJSON
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Analyze task complexity and generate recommendations
|
* Analyze task complexity and generate recommendations
|
||||||
* @param {Object} args - Function arguments
|
* @param {Object} args - Function arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
* @param {string} args.outputPath - Explicit absolute path to save the report.
|
* @param {string} [args.output] - Output file path for the report
|
||||||
* @param {string} [args.model] - LLM model to use for analysis
|
* @param {string} [args.model] - LLM model to use for analysis
|
||||||
* @param {string|number} [args.threshold] - Minimum complexity score to recommend expansion (1-10)
|
* @param {string|number} [args.threshold] - Minimum complexity score to recommend expansion (1-10)
|
||||||
* @param {boolean} [args.research] - Use Perplexity AI for research-backed complexity analysis
|
* @param {boolean} [args.research] - Use Perplexity AI for research-backed complexity analysis
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @param {Object} [context={}] - Context object containing session data
|
|
||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function analyzeTaskComplexityDirect(args, log, context = {}) {
|
export async function analyzeTaskComplexityDirect(args, log) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
try {
|
||||||
// Destructure expected args
|
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
|
||||||
const { tasksJsonPath, outputPath, model, threshold, research } = args;
|
|
||||||
|
// Find the tasks.json path
|
||||||
try {
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
|
|
||||||
|
// Determine output path
|
||||||
// Check if required paths were provided
|
let outputPath = args.output || 'scripts/task-complexity-report.json';
|
||||||
if (!tasksJsonPath) {
|
if (!path.isAbsolute(outputPath) && args.projectRoot) {
|
||||||
log.error('analyzeTaskComplexityDirect called without tasksJsonPath');
|
outputPath = path.join(args.projectRoot, outputPath);
|
||||||
return {
|
}
|
||||||
success: false,
|
|
||||||
error: {
|
// Create options object for analyzeTaskComplexity
|
||||||
code: 'MISSING_ARGUMENT',
|
const options = {
|
||||||
message: 'tasksJsonPath is required'
|
file: tasksPath,
|
||||||
}
|
output: outputPath,
|
||||||
};
|
model: args.model,
|
||||||
}
|
threshold: args.threshold,
|
||||||
if (!outputPath) {
|
research: args.research === true
|
||||||
log.error('analyzeTaskComplexityDirect called without outputPath');
|
};
|
||||||
return {
|
|
||||||
success: false,
|
log.info(`Analyzing task complexity from: ${tasksPath}`);
|
||||||
error: { code: 'MISSING_ARGUMENT', message: 'outputPath is required' }
|
log.info(`Output report will be saved to: ${outputPath}`);
|
||||||
};
|
|
||||||
}
|
if (options.research) {
|
||||||
|
log.info('Using Perplexity AI for research-backed complexity analysis');
|
||||||
// Use the provided paths
|
}
|
||||||
const tasksPath = tasksJsonPath;
|
|
||||||
const resolvedOutputPath = outputPath;
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
|
enableSilentMode();
|
||||||
log.info(`Analyzing task complexity from: ${tasksPath}`);
|
|
||||||
log.info(`Output report will be saved to: ${resolvedOutputPath}`);
|
// Call the core function
|
||||||
|
await analyzeTaskComplexity(options);
|
||||||
if (research) {
|
|
||||||
log.info('Using Perplexity AI for research-backed complexity analysis');
|
// Restore normal logging
|
||||||
}
|
disableSilentMode();
|
||||||
|
|
||||||
// Create options object for analyzeTaskComplexity using provided paths
|
// Verify the report file was created
|
||||||
const options = {
|
if (!fs.existsSync(outputPath)) {
|
||||||
file: tasksPath,
|
return {
|
||||||
output: resolvedOutputPath,
|
success: false,
|
||||||
model: model,
|
error: {
|
||||||
threshold: threshold,
|
code: 'ANALYZE_ERROR',
|
||||||
research: research === true
|
message: 'Analysis completed but no report file was created'
|
||||||
};
|
}
|
||||||
|
};
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
}
|
||||||
const wasSilent = isSilentMode();
|
|
||||||
if (!wasSilent) {
|
// Read the report file
|
||||||
enableSilentMode();
|
const report = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||||
}
|
|
||||||
|
return {
|
||||||
// Create a logWrapper that matches the expected mcpLog interface as specified in utilities.mdc
|
success: true,
|
||||||
const logWrapper = {
|
data: {
|
||||||
info: (message, ...args) => log.info(message, ...args),
|
message: `Task complexity analysis complete. Report saved to ${outputPath}`,
|
||||||
warn: (message, ...args) => log.warn(message, ...args),
|
reportPath: outputPath,
|
||||||
error: (message, ...args) => log.error(message, ...args),
|
reportSummary: {
|
||||||
debug: (message, ...args) => log.debug && log.debug(message, ...args),
|
taskCount: report.length,
|
||||||
success: (message, ...args) => log.info(message, ...args) // Map success to info
|
highComplexityTasks: report.filter(t => t.complexityScore >= 8).length,
|
||||||
};
|
mediumComplexityTasks: report.filter(t => t.complexityScore >= 5 && t.complexityScore < 8).length,
|
||||||
|
lowComplexityTasks: report.filter(t => t.complexityScore < 5).length,
|
||||||
try {
|
}
|
||||||
// Call the core function with session and logWrapper as mcpLog
|
}
|
||||||
await analyzeTaskComplexity(options, {
|
};
|
||||||
session,
|
} catch (error) {
|
||||||
mcpLog: logWrapper // Use the wrapper instead of passing log directly
|
// Make sure to restore normal logging even if there's an error
|
||||||
});
|
disableSilentMode();
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in analyzeTaskComplexity: ${error.message}`);
|
log.error(`Error in analyzeTaskComplexityDirect: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'ANALYZE_ERROR',
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
message: `Error running complexity analysis: ${error.message}`
|
message: error.message
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} finally {
|
}
|
||||||
// Always restore normal logging in finally block, but only if we enabled it
|
}
|
||||||
if (!wasSilent) {
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the report file was created
|
|
||||||
if (!fs.existsSync(resolvedOutputPath)) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'ANALYZE_ERROR',
|
|
||||||
message: 'Analysis completed but no report file was created'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the report file
|
|
||||||
let report;
|
|
||||||
try {
|
|
||||||
report = JSON.parse(fs.readFileSync(resolvedOutputPath, 'utf8'));
|
|
||||||
|
|
||||||
// Important: Handle different report formats
|
|
||||||
// The core function might return an array or an object with a complexityAnalysis property
|
|
||||||
const analysisArray = Array.isArray(report)
|
|
||||||
? report
|
|
||||||
: report.complexityAnalysis || [];
|
|
||||||
|
|
||||||
// Count tasks by complexity
|
|
||||||
const highComplexityTasks = analysisArray.filter(
|
|
||||||
(t) => t.complexityScore >= 8
|
|
||||||
).length;
|
|
||||||
const mediumComplexityTasks = analysisArray.filter(
|
|
||||||
(t) => t.complexityScore >= 5 && t.complexityScore < 8
|
|
||||||
).length;
|
|
||||||
const lowComplexityTasks = analysisArray.filter(
|
|
||||||
(t) => t.complexityScore < 5
|
|
||||||
).length;
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: `Task complexity analysis complete. Report saved to ${resolvedOutputPath}`,
|
|
||||||
reportPath: resolvedOutputPath,
|
|
||||||
reportSummary: {
|
|
||||||
taskCount: analysisArray.length,
|
|
||||||
highComplexityTasks,
|
|
||||||
mediumComplexityTasks,
|
|
||||||
lowComplexityTasks
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (parseError) {
|
|
||||||
log.error(`Error parsing report file: ${parseError.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'REPORT_PARSE_ERROR',
|
|
||||||
message: `Error parsing complexity report: ${parseError.message}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
if (isSilentMode()) {
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
|
|
||||||
log.error(`Error in analyzeTaskComplexityDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -12,21 +12,21 @@ import { contextManager } from '../context-manager.js';
|
|||||||
* @returns {Object} - Cache statistics
|
* @returns {Object} - Cache statistics
|
||||||
*/
|
*/
|
||||||
export async function getCacheStatsDirect(args, log) {
|
export async function getCacheStatsDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info('Retrieving cache statistics');
|
log.info('Retrieving cache statistics');
|
||||||
const stats = contextManager.getStats();
|
const stats = contextManager.getStats();
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: stats
|
data: stats
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error getting cache stats: ${error.message}`);
|
log.error(`Error getting cache stats: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'CACHE_STATS_ERROR',
|
code: 'CACHE_STATS_ERROR',
|
||||||
message: error.message || 'Unknown error occurred'
|
message: error.message || 'Unknown error occurred'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3,126 +3,110 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { clearSubtasks } from '../../../../scripts/modules/task-manager.js';
|
import { clearSubtasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clear subtasks from specified tasks
|
* Clear subtasks from specified tasks
|
||||||
* @param {Object} args - Function arguments
|
* @param {Object} args - Function arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {string} [args.id] - Task IDs (comma-separated) to clear subtasks from
|
* @param {string} [args.id] - Task IDs (comma-separated) to clear subtasks from
|
||||||
* @param {boolean} [args.all] - Clear subtasks from all tasks
|
* @param {boolean} [args.all] - Clear subtasks from all tasks
|
||||||
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function clearSubtasksDirect(args, log) {
|
export async function clearSubtasksDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath, id, all } = args;
|
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
||||||
try {
|
|
||||||
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
// Either id or all must be provided
|
||||||
|
if (!args.id && !args.all) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
|
message: 'Either task IDs with id parameter or all parameter must be provided'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Check if tasksJsonPath was provided
|
// Find the tasks.json path
|
||||||
if (!tasksJsonPath) {
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
log.error('clearSubtasksDirect called without tasksJsonPath');
|
|
||||||
return {
|
// Check if tasks.json exists
|
||||||
success: false,
|
if (!fs.existsSync(tasksPath)) {
|
||||||
error: {
|
return {
|
||||||
code: 'MISSING_ARGUMENT',
|
success: false,
|
||||||
message: 'tasksJsonPath is required'
|
error: {
|
||||||
}
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
};
|
message: `Tasks file not found at ${tasksPath}`
|
||||||
}
|
}
|
||||||
|
};
|
||||||
// Either id or all must be provided
|
}
|
||||||
if (!id && !all) {
|
|
||||||
return {
|
let taskIds;
|
||||||
success: false,
|
|
||||||
error: {
|
// If all is specified, get all task IDs
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
if (args.all) {
|
||||||
message:
|
log.info('Clearing subtasks from all tasks');
|
||||||
'Either task IDs with id parameter or all parameter must be provided'
|
const data = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
||||||
}
|
if (!data || !data.tasks || data.tasks.length === 0) {
|
||||||
};
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: {
|
||||||
// Use provided path
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
const tasksPath = tasksJsonPath;
|
message: 'No valid tasks found in the tasks file'
|
||||||
|
}
|
||||||
// Check if tasks.json exists
|
};
|
||||||
if (!fs.existsSync(tasksPath)) {
|
}
|
||||||
return {
|
taskIds = data.tasks.map(t => t.id).join(',');
|
||||||
success: false,
|
} else {
|
||||||
error: {
|
// Use the provided task IDs
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
taskIds = args.id;
|
||||||
message: `Tasks file not found at ${tasksPath}`
|
}
|
||||||
}
|
|
||||||
};
|
log.info(`Clearing subtasks from tasks: ${taskIds}`);
|
||||||
}
|
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
let taskIds;
|
enableSilentMode();
|
||||||
|
|
||||||
// If all is specified, get all task IDs
|
// Call the core function
|
||||||
if (all) {
|
clearSubtasks(tasksPath, taskIds);
|
||||||
log.info('Clearing subtasks from all tasks');
|
|
||||||
const data = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
// Restore normal logging
|
||||||
if (!data || !data.tasks || data.tasks.length === 0) {
|
disableSilentMode();
|
||||||
return {
|
|
||||||
success: false,
|
// Read the updated data to provide a summary
|
||||||
error: {
|
const updatedData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
const taskIdArray = taskIds.split(',').map(id => parseInt(id.trim(), 10));
|
||||||
message: 'No valid tasks found in the tasks file'
|
|
||||||
}
|
// Build a summary of what was done
|
||||||
};
|
const clearedTasksCount = taskIdArray.length;
|
||||||
}
|
const taskSummary = taskIdArray.map(id => {
|
||||||
taskIds = data.tasks.map((t) => t.id).join(',');
|
const task = updatedData.tasks.find(t => t.id === id);
|
||||||
} else {
|
return task ? { id, title: task.title } : { id, title: 'Task not found' };
|
||||||
// Use the provided task IDs
|
});
|
||||||
taskIds = id;
|
|
||||||
}
|
return {
|
||||||
|
success: true,
|
||||||
log.info(`Clearing subtasks from tasks: ${taskIds}`);
|
data: {
|
||||||
|
message: `Successfully cleared subtasks from ${clearedTasksCount} task(s)`,
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
tasksCleared: taskSummary
|
||||||
enableSilentMode();
|
}
|
||||||
|
};
|
||||||
// Call the core function
|
} catch (error) {
|
||||||
clearSubtasks(tasksPath, taskIds);
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
log.error(`Error in clearSubtasksDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
// Read the updated data to provide a summary
|
success: false,
|
||||||
const updatedData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
error: {
|
||||||
const taskIdArray = taskIds.split(',').map((id) => parseInt(id.trim(), 10));
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message
|
||||||
// Build a summary of what was done
|
}
|
||||||
const clearedTasksCount = taskIdArray.length;
|
};
|
||||||
const taskSummary = taskIdArray.map((id) => {
|
}
|
||||||
const task = updatedData.tasks.find((t) => t.id === id);
|
}
|
||||||
return task ? { id, title: task.title } : { id, title: 'Task not found' };
|
|
||||||
});
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: `Successfully cleared subtasks from ${clearedTasksCount} task(s)`,
|
|
||||||
tasksCleared: taskSummary
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error in clearSubtasksDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,128 +3,119 @@
|
|||||||
* Direct function implementation for displaying complexity analysis report
|
* Direct function implementation for displaying complexity analysis report
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import {
|
import { readComplexityReport, enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
readComplexityReport,
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing reportPath.
|
* @param {Object} args - Command arguments containing file path option
|
||||||
* @param {string} args.reportPath - Explicit path to the complexity report file.
|
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information
|
* @returns {Promise<Object>} - Result object with success status and data/error information
|
||||||
*/
|
*/
|
||||||
export async function complexityReportDirect(args, log) {
|
export async function complexityReportDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { reportPath } = args;
|
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
|
||||||
try {
|
|
||||||
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
|
// Get tasks file path to determine project root for the default report location
|
||||||
|
let tasksPath;
|
||||||
|
try {
|
||||||
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
|
} catch (error) {
|
||||||
|
log.warn(`Tasks file not found, using current directory: ${error.message}`);
|
||||||
|
// Continue with default or specified report path
|
||||||
|
}
|
||||||
|
|
||||||
// Check if reportPath was provided
|
// Get report file path from args or use default
|
||||||
if (!reportPath) {
|
const reportPath = args.file || path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
|
||||||
log.error('complexityReportDirect called without reportPath');
|
|
||||||
return {
|
log.info(`Looking for complexity report at: ${reportPath}`);
|
||||||
success: false,
|
|
||||||
error: { code: 'MISSING_ARGUMENT', message: 'reportPath is required' },
|
// Generate cache key based on report path
|
||||||
fromCache: false
|
const cacheKey = `complexityReport:${reportPath}`;
|
||||||
};
|
|
||||||
}
|
// Define the core action function to read the report
|
||||||
|
const coreActionFn = async () => {
|
||||||
|
try {
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
|
enableSilentMode();
|
||||||
|
|
||||||
|
const report = readComplexityReport(reportPath);
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
if (!report) {
|
||||||
|
log.warn(`No complexity report found at ${reportPath}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
|
message: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
report,
|
||||||
|
reportPath
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error reading complexity report: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'READ_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Use the provided report path
|
// Use the caching utility
|
||||||
log.info(`Looking for complexity report at: ${reportPath}`);
|
try {
|
||||||
|
const result = await getCachedOrExecute({
|
||||||
// Generate cache key based on report path
|
cacheKey,
|
||||||
const cacheKey = `complexityReport:${reportPath}`;
|
actionFn: coreActionFn,
|
||||||
|
log
|
||||||
// Define the core action function to read the report
|
});
|
||||||
const coreActionFn = async () => {
|
log.info(`complexityReportDirect completed. From cache: ${result.fromCache}`);
|
||||||
try {
|
return result; // Returns { success, data/error, fromCache }
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
} catch (error) {
|
||||||
enableSilentMode();
|
// Catch unexpected errors from getCachedOrExecute itself
|
||||||
|
// Ensure silent mode is disabled
|
||||||
const report = readComplexityReport(reportPath);
|
disableSilentMode();
|
||||||
|
|
||||||
// Restore normal logging
|
log.error(`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`);
|
||||||
disableSilentMode();
|
return {
|
||||||
|
success: false,
|
||||||
if (!report) {
|
error: {
|
||||||
log.warn(`No complexity report found at ${reportPath}`);
|
code: 'UNEXPECTED_ERROR',
|
||||||
return {
|
message: error.message
|
||||||
success: false,
|
},
|
||||||
error: {
|
fromCache: false
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
};
|
||||||
message: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`
|
}
|
||||||
}
|
} catch (error) {
|
||||||
};
|
// Ensure silent mode is disabled if an outer error occurs
|
||||||
}
|
disableSilentMode();
|
||||||
|
|
||||||
return {
|
log.error(`Error in complexityReportDirect: ${error.message}`);
|
||||||
success: true,
|
return {
|
||||||
data: {
|
success: false,
|
||||||
report,
|
error: {
|
||||||
reportPath
|
code: 'UNEXPECTED_ERROR',
|
||||||
}
|
message: error.message
|
||||||
};
|
},
|
||||||
} catch (error) {
|
fromCache: false
|
||||||
// Make sure to restore normal logging even if there's an error
|
};
|
||||||
disableSilentMode();
|
}
|
||||||
|
}
|
||||||
log.error(`Error reading complexity report: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'READ_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the caching utility
|
|
||||||
try {
|
|
||||||
const result = await getCachedOrExecute({
|
|
||||||
cacheKey,
|
|
||||||
actionFn: coreActionFn,
|
|
||||||
log
|
|
||||||
});
|
|
||||||
log.info(
|
|
||||||
`complexityReportDirect completed. From cache: ${result.fromCache}`
|
|
||||||
);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
|
||||||
} catch (error) {
|
|
||||||
// Catch unexpected errors from getCachedOrExecute itself
|
|
||||||
// Ensure silent mode is disabled
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(
|
|
||||||
`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`
|
|
||||||
);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UNEXPECTED_ERROR',
|
|
||||||
message: error.message
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Ensure silent mode is disabled if an outer error occurs
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error in complexityReportDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UNEXPECTED_ERROR',
|
|
||||||
message: error.message
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,140 +3,84 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { expandAllTasks } from '../../../../scripts/modules/task-manager.js';
|
import { expandAllTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
enableSilentMode,
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
disableSilentMode,
|
|
||||||
isSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import { getAnthropicClientForMCP } from '../utils/ai-client-utils.js';
|
|
||||||
import path from 'path';
|
|
||||||
import fs from 'fs';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Expand all pending tasks with subtasks
|
* Expand all pending tasks with subtasks
|
||||||
* @param {Object} args - Function arguments
|
* @param {Object} args - Function arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {number|string} [args.num] - Number of subtasks to generate
|
* @param {number|string} [args.num] - Number of subtasks to generate
|
||||||
* @param {boolean} [args.research] - Enable Perplexity AI for research-backed subtask generation
|
* @param {boolean} [args.research] - Enable Perplexity AI for research-backed subtask generation
|
||||||
* @param {string} [args.prompt] - Additional context to guide subtask generation
|
* @param {string} [args.prompt] - Additional context to guide subtask generation
|
||||||
* @param {boolean} [args.force] - Force regeneration of subtasks for tasks that already have them
|
* @param {boolean} [args.force] - Force regeneration of subtasks for tasks that already have them
|
||||||
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @param {Object} context - Context object containing session
|
|
||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function expandAllTasksDirect(args, log, context = {}) {
|
export async function expandAllTasksDirect(args, log) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
try {
|
||||||
// Destructure expected args
|
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
||||||
const { tasksJsonPath, num, research, prompt, force } = args;
|
|
||||||
|
// Find the tasks.json path
|
||||||
try {
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
|
||||||
|
// Parse parameters
|
||||||
// Check if tasksJsonPath was provided
|
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
||||||
if (!tasksJsonPath) {
|
const useResearch = args.research === true;
|
||||||
log.error('expandAllTasksDirect called without tasksJsonPath');
|
const additionalContext = args.prompt || '';
|
||||||
return {
|
const forceFlag = args.force === true;
|
||||||
success: false,
|
|
||||||
error: {
|
log.info(`Expanding all tasks with ${numSubtasks || 'default'} subtasks each...`);
|
||||||
code: 'MISSING_ARGUMENT',
|
if (useResearch) {
|
||||||
message: 'tasksJsonPath is required'
|
log.info('Using Perplexity AI for research-backed subtask generation');
|
||||||
}
|
}
|
||||||
};
|
if (additionalContext) {
|
||||||
}
|
log.info(`Additional context: "${additionalContext}"`);
|
||||||
|
}
|
||||||
// Enable silent mode early to prevent any console output
|
if (forceFlag) {
|
||||||
enableSilentMode();
|
log.info('Force regeneration of subtasks is enabled');
|
||||||
|
}
|
||||||
try {
|
|
||||||
// Remove internal path finding
|
try {
|
||||||
/*
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
enableSilentMode();
|
||||||
*/
|
|
||||||
// Use provided path
|
// Call the core function
|
||||||
const tasksPath = tasksJsonPath;
|
await expandAllTasks(numSubtasks, useResearch, additionalContext, forceFlag);
|
||||||
|
|
||||||
// Parse parameters
|
// Restore normal logging
|
||||||
const numSubtasks = num ? parseInt(num, 10) : undefined;
|
disableSilentMode();
|
||||||
const useResearch = research === true;
|
|
||||||
const additionalContext = prompt || '';
|
// The expandAllTasks function doesn't have a return value, so we'll create our own success response
|
||||||
const forceFlag = force === true;
|
return {
|
||||||
|
success: true,
|
||||||
log.info(
|
data: {
|
||||||
`Expanding all tasks with ${numSubtasks || 'default'} subtasks each...`
|
message: "Successfully expanded all pending tasks with subtasks",
|
||||||
);
|
details: {
|
||||||
|
numSubtasks: numSubtasks,
|
||||||
if (useResearch) {
|
research: useResearch,
|
||||||
log.info('Using Perplexity AI for research-backed subtask generation');
|
prompt: additionalContext,
|
||||||
|
force: forceFlag
|
||||||
// Initialize AI client for research-backed expansion
|
}
|
||||||
try {
|
}
|
||||||
await getAnthropicClientForMCP(session, log);
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Ensure silent mode is disabled before returning error
|
// Make sure to restore normal logging even if there's an error
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
throw error; // Rethrow to be caught by outer catch block
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
}
|
||||||
return {
|
} catch (error) {
|
||||||
success: false,
|
// Ensure silent mode is disabled
|
||||||
error: {
|
disableSilentMode();
|
||||||
code: 'AI_CLIENT_ERROR',
|
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
log.error(`Error in expandAllTasksDirect: ${error.message}`);
|
||||||
}
|
return {
|
||||||
};
|
success: false,
|
||||||
}
|
error: {
|
||||||
}
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message
|
||||||
if (additionalContext) {
|
}
|
||||||
log.info(`Additional context: "${additionalContext}"`);
|
};
|
||||||
}
|
}
|
||||||
if (forceFlag) {
|
}
|
||||||
log.info('Force regeneration of subtasks is enabled');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the core function with session context for AI operations
|
|
||||||
// and outputFormat as 'json' to prevent UI elements
|
|
||||||
const result = await expandAllTasks(
|
|
||||||
tasksPath,
|
|
||||||
numSubtasks,
|
|
||||||
useResearch,
|
|
||||||
additionalContext,
|
|
||||||
forceFlag,
|
|
||||||
{ mcpLog: log, session },
|
|
||||||
'json' // Use JSON output format to prevent UI elements
|
|
||||||
);
|
|
||||||
|
|
||||||
// The expandAllTasks function now returns a result object
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: 'Successfully expanded all pending tasks with subtasks',
|
|
||||||
details: {
|
|
||||||
numSubtasks: numSubtasks,
|
|
||||||
research: useResearch,
|
|
||||||
prompt: additionalContext,
|
|
||||||
force: forceFlag,
|
|
||||||
tasksExpanded: result.expandedCount,
|
|
||||||
totalEligibleTasks: result.tasksToExpand
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} finally {
|
|
||||||
// Restore normal logging in finally block to ensure it runs even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Ensure silent mode is disabled if an error occurs
|
|
||||||
if (isSilentMode()) {
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
|
|
||||||
log.error(`Error in expandAllTasksDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,17 +4,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { expandTask } from '../../../../scripts/modules/task-manager.js';
|
import { expandTask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { readJSON, writeJSON, enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
readJSON,
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
writeJSON,
|
|
||||||
enableSilentMode,
|
|
||||||
disableSilentMode,
|
|
||||||
isSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import {
|
|
||||||
getAnthropicClientForMCP,
|
|
||||||
getModelConfig
|
|
||||||
} from '../utils/ai-client-utils.js';
|
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
@@ -22,249 +13,162 @@ import fs from 'fs';
|
|||||||
* Direct function wrapper for expanding a task into subtasks with error handling.
|
* Direct function wrapper for expanding a task into subtasks with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments
|
* @param {Object} args - Command arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {string} args.id - The ID of the task to expand.
|
|
||||||
* @param {number|string} [args.num] - Number of subtasks to generate.
|
|
||||||
* @param {boolean} [args.research] - Enable Perplexity AI for research-backed subtask generation.
|
|
||||||
* @param {string} [args.prompt] - Additional context to guide subtask generation.
|
|
||||||
* @param {boolean} [args.force] - Force expansion even if subtasks exist.
|
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @param {Object} context - Context object containing session and reportProgress
|
|
||||||
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||||
*/
|
*/
|
||||||
export async function expandTaskDirect(args, log, context = {}) {
|
export async function expandTaskDirect(args, log) {
|
||||||
const { session } = context;
|
let tasksPath;
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath, id, num, research, prompt, force } = args;
|
// Find the tasks path first
|
||||||
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
|
message: error.message
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Log session root data for debugging
|
// Validate task ID
|
||||||
log.info(
|
const taskId = args.id ? parseInt(args.id, 10) : null;
|
||||||
`Session data in expandTaskDirect: ${JSON.stringify({
|
if (!taskId) {
|
||||||
hasSession: !!session,
|
log.error('Task ID is required');
|
||||||
sessionKeys: session ? Object.keys(session) : [],
|
return {
|
||||||
roots: session?.roots,
|
success: false,
|
||||||
rootsStr: JSON.stringify(session?.roots)
|
error: {
|
||||||
})}`
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
);
|
message: 'Task ID is required'
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Check if tasksJsonPath was provided
|
// Process other parameters
|
||||||
if (!tasksJsonPath) {
|
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
||||||
log.error('expandTaskDirect called without tasksJsonPath');
|
const useResearch = args.research === true;
|
||||||
return {
|
const additionalContext = args.prompt || '';
|
||||||
success: false,
|
const force = args.force === true;
|
||||||
error: {
|
|
||||||
code: 'MISSING_ARGUMENT',
|
|
||||||
message: 'tasksJsonPath is required'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use provided path
|
try {
|
||||||
const tasksPath = tasksJsonPath;
|
log.info(`Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}, Force: ${force}`);
|
||||||
|
|
||||||
log.info(`[expandTaskDirect] Using tasksPath: ${tasksPath}`);
|
// Read tasks data
|
||||||
|
const data = readJSON(tasksPath);
|
||||||
// Validate task ID
|
if (!data || !data.tasks) {
|
||||||
const taskId = id ? parseInt(id, 10) : null;
|
return {
|
||||||
if (!taskId) {
|
success: false,
|
||||||
log.error('Task ID is required');
|
error: {
|
||||||
return {
|
code: 'INVALID_TASKS_FILE',
|
||||||
success: false,
|
message: `No valid tasks found in ${tasksPath}`
|
||||||
error: {
|
},
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
fromCache: false
|
||||||
message: 'Task ID is required'
|
};
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
// Find the specific task
|
||||||
}
|
const task = data.tasks.find(t => t.id === taskId);
|
||||||
|
|
||||||
// Process other parameters
|
if (!task) {
|
||||||
const numSubtasks = num ? parseInt(num, 10) : undefined;
|
return {
|
||||||
const useResearch = research === true;
|
success: false,
|
||||||
const additionalContext = prompt || '';
|
error: {
|
||||||
const forceFlag = force === true;
|
code: 'TASK_NOT_FOUND',
|
||||||
|
message: `Task with ID ${taskId} not found`
|
||||||
// Initialize AI client if needed (for expandTask function)
|
},
|
||||||
try {
|
fromCache: false
|
||||||
// This ensures the AI client is available by checking it
|
};
|
||||||
if (useResearch) {
|
}
|
||||||
log.info('Verifying AI client for research-backed expansion');
|
|
||||||
await getAnthropicClientForMCP(session, log);
|
// Check if task is completed
|
||||||
}
|
if (task.status === 'done' || task.status === 'completed') {
|
||||||
} catch (error) {
|
return {
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
success: false,
|
||||||
return {
|
error: {
|
||||||
success: false,
|
code: 'TASK_COMPLETED',
|
||||||
error: {
|
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
|
||||||
code: 'AI_CLIENT_ERROR',
|
},
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
fromCache: false
|
||||||
},
|
};
|
||||||
fromCache: false
|
}
|
||||||
};
|
|
||||||
}
|
// Check for existing subtasks
|
||||||
|
const hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;
|
||||||
try {
|
|
||||||
log.info(
|
// Keep a copy of the task before modification
|
||||||
`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}`
|
const originalTask = JSON.parse(JSON.stringify(task));
|
||||||
);
|
|
||||||
|
// Tracking subtasks count before expansion
|
||||||
// Read tasks data
|
const subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;
|
||||||
log.info(`[expandTaskDirect] Attempting to read JSON from: ${tasksPath}`);
|
|
||||||
const data = readJSON(tasksPath);
|
// Create a backup of the tasks.json file
|
||||||
log.info(
|
const backupPath = path.join(path.dirname(tasksPath), 'tasks.json.bak');
|
||||||
`[expandTaskDirect] Result of readJSON: ${data ? 'Data read successfully' : 'readJSON returned null or undefined'}`
|
fs.copyFileSync(tasksPath, backupPath);
|
||||||
);
|
|
||||||
|
// Directly modify the data instead of calling the CLI function
|
||||||
if (!data || !data.tasks) {
|
if (!task.subtasks) {
|
||||||
log.error(
|
task.subtasks = [];
|
||||||
`[expandTaskDirect] readJSON failed or returned invalid data for path: ${tasksPath}`
|
}
|
||||||
);
|
|
||||||
return {
|
// Save tasks.json with potentially empty subtasks array
|
||||||
success: false,
|
writeJSON(tasksPath, data);
|
||||||
error: {
|
|
||||||
code: 'INVALID_TASKS_FILE',
|
// Process the request
|
||||||
message: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`
|
try {
|
||||||
},
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
fromCache: false
|
enableSilentMode();
|
||||||
};
|
|
||||||
}
|
// Call expandTask
|
||||||
|
const result = await expandTask(taskId, numSubtasks, useResearch, additionalContext);
|
||||||
// Find the specific task
|
|
||||||
log.info(`[expandTaskDirect] Searching for task ID ${taskId} in data`);
|
// Restore normal logging
|
||||||
const task = data.tasks.find((t) => t.id === taskId);
|
disableSilentMode();
|
||||||
log.info(`[expandTaskDirect] Task found: ${task ? 'Yes' : 'No'}`);
|
|
||||||
|
// Read the updated data
|
||||||
if (!task) {
|
const updatedData = readJSON(tasksPath);
|
||||||
return {
|
const updatedTask = updatedData.tasks.find(t => t.id === taskId);
|
||||||
success: false,
|
|
||||||
error: {
|
// Calculate how many subtasks were added
|
||||||
code: 'TASK_NOT_FOUND',
|
const subtasksAdded = updatedTask.subtasks ?
|
||||||
message: `Task with ID ${taskId} not found`
|
updatedTask.subtasks.length - subtasksCountBefore : 0;
|
||||||
},
|
|
||||||
fromCache: false
|
// Return the result
|
||||||
};
|
log.info(`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`);
|
||||||
}
|
return {
|
||||||
|
success: true,
|
||||||
// Check if task is completed
|
data: {
|
||||||
if (task.status === 'done' || task.status === 'completed') {
|
task: updatedTask,
|
||||||
return {
|
subtasksAdded,
|
||||||
success: false,
|
hasExistingSubtasks
|
||||||
error: {
|
},
|
||||||
code: 'TASK_COMPLETED',
|
fromCache: false
|
||||||
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
|
};
|
||||||
},
|
} catch (error) {
|
||||||
fromCache: false
|
// Make sure to restore normal logging even if there's an error
|
||||||
};
|
disableSilentMode();
|
||||||
}
|
|
||||||
|
log.error(`Error expanding task: ${error.message}`);
|
||||||
// Check for existing subtasks and force flag
|
return {
|
||||||
const hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;
|
success: false,
|
||||||
if (hasExistingSubtasks && !forceFlag) {
|
error: {
|
||||||
log.info(
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
`Task ${taskId} already has ${task.subtasks.length} subtasks. Use --force to overwrite.`
|
message: error.message || 'Failed to expand task'
|
||||||
);
|
},
|
||||||
return {
|
fromCache: false
|
||||||
success: true,
|
};
|
||||||
data: {
|
}
|
||||||
message: `Task ${taskId} already has subtasks. Expansion skipped.`,
|
} catch (error) {
|
||||||
task,
|
log.error(`Error expanding task: ${error.message}`);
|
||||||
subtasksAdded: 0,
|
return {
|
||||||
hasExistingSubtasks
|
success: false,
|
||||||
},
|
error: {
|
||||||
fromCache: false
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
};
|
message: error.message || 'Failed to expand task'
|
||||||
}
|
},
|
||||||
|
fromCache: false
|
||||||
// If force flag is set, clear existing subtasks
|
};
|
||||||
if (hasExistingSubtasks && forceFlag) {
|
}
|
||||||
log.info(
|
}
|
||||||
`Force flag set. Clearing existing subtasks for task ${taskId}.`
|
|
||||||
);
|
|
||||||
task.subtasks = [];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep a copy of the task before modification
|
|
||||||
const originalTask = JSON.parse(JSON.stringify(task));
|
|
||||||
|
|
||||||
// Tracking subtasks count before expansion
|
|
||||||
const subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;
|
|
||||||
|
|
||||||
// Create a backup of the tasks.json file
|
|
||||||
const backupPath = path.join(path.dirname(tasksPath), 'tasks.json.bak');
|
|
||||||
fs.copyFileSync(tasksPath, backupPath);
|
|
||||||
|
|
||||||
// Directly modify the data instead of calling the CLI function
|
|
||||||
if (!task.subtasks) {
|
|
||||||
task.subtasks = [];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save tasks.json with potentially empty subtasks array
|
|
||||||
writeJSON(tasksPath, data);
|
|
||||||
|
|
||||||
// Process the request
|
|
||||||
try {
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
|
||||||
|
|
||||||
// Call expandTask with session context to ensure AI client is properly initialized
|
|
||||||
const result = await expandTask(
|
|
||||||
tasksPath,
|
|
||||||
taskId,
|
|
||||||
numSubtasks,
|
|
||||||
useResearch,
|
|
||||||
additionalContext,
|
|
||||||
{ mcpLog: log, session } // Only pass mcpLog and session, NOT reportProgress
|
|
||||||
);
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
// Read the updated data
|
|
||||||
const updatedData = readJSON(tasksPath);
|
|
||||||
const updatedTask = updatedData.tasks.find((t) => t.id === taskId);
|
|
||||||
|
|
||||||
// Calculate how many subtasks were added
|
|
||||||
const subtasksAdded = updatedTask.subtasks
|
|
||||||
? updatedTask.subtasks.length - subtasksCountBefore
|
|
||||||
: 0;
|
|
||||||
|
|
||||||
// Return the result
|
|
||||||
log.info(
|
|
||||||
`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`
|
|
||||||
);
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
task: updatedTask,
|
|
||||||
subtasksAdded,
|
|
||||||
hasExistingSubtasks
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error expanding task: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message || 'Failed to expand task'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error expanding task: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message || 'Failed to expand task'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,78 +3,63 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
import { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fix invalid dependencies in tasks.json automatically
|
* Fix invalid dependencies in tasks.json automatically
|
||||||
* @param {Object} args - Function arguments
|
* @param {Object} args - Function arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function fixDependenciesDirect(args, log) {
|
export async function fixDependenciesDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath } = args;
|
log.info(`Fixing invalid dependencies in tasks...`);
|
||||||
try {
|
|
||||||
log.info(`Fixing invalid dependencies in tasks: ${tasksJsonPath}`);
|
// Find the tasks.json path
|
||||||
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
// Check if tasksJsonPath was provided
|
|
||||||
if (!tasksJsonPath) {
|
// Verify the file exists
|
||||||
log.error('fixDependenciesDirect called without tasksJsonPath');
|
if (!fs.existsSync(tasksPath)) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'MISSING_ARGUMENT',
|
code: 'FILE_NOT_FOUND',
|
||||||
message: 'tasksJsonPath is required'
|
message: `Tasks file not found at ${tasksPath}`
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use provided path
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
const tasksPath = tasksJsonPath;
|
enableSilentMode();
|
||||||
|
|
||||||
// Verify the file exists
|
// Call the original command function
|
||||||
if (!fs.existsSync(tasksPath)) {
|
await fixDependenciesCommand(tasksPath);
|
||||||
return {
|
|
||||||
success: false,
|
// Restore normal logging
|
||||||
error: {
|
disableSilentMode();
|
||||||
code: 'FILE_NOT_FOUND',
|
|
||||||
message: `Tasks file not found at ${tasksPath}`
|
return {
|
||||||
}
|
success: true,
|
||||||
};
|
data: {
|
||||||
}
|
message: 'Dependencies fixed successfully',
|
||||||
|
tasksPath
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
}
|
||||||
enableSilentMode();
|
};
|
||||||
|
} catch (error) {
|
||||||
// Call the original command function using the provided path
|
// Make sure to restore normal logging even if there's an error
|
||||||
await fixDependenciesCommand(tasksPath);
|
disableSilentMode();
|
||||||
|
|
||||||
// Restore normal logging
|
log.error(`Error fixing dependencies: ${error.message}`);
|
||||||
disableSilentMode();
|
return {
|
||||||
|
success: false,
|
||||||
return {
|
error: {
|
||||||
success: true,
|
code: 'FIX_DEPENDENCIES_ERROR',
|
||||||
data: {
|
message: error.message
|
||||||
message: 'Dependencies fixed successfully',
|
}
|
||||||
tasksPath
|
};
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error fixing dependencies: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'FIX_DEPENDENCIES_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,97 +4,84 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { generateTaskFiles } from '../../../../scripts/modules/task-manager.js';
|
import { generateTaskFiles } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
enableSilentMode,
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for generateTaskFiles with error handling.
|
* Direct function wrapper for generateTaskFiles with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing tasksJsonPath and outputDir.
|
* @param {Object} args - Command arguments containing file and output path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function generateTaskFilesDirect(args, log) {
|
export async function generateTaskFilesDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath, outputDir } = args;
|
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
||||||
try {
|
|
||||||
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
// Get tasks file path
|
||||||
|
let tasksPath;
|
||||||
// Check if paths were provided
|
try {
|
||||||
if (!tasksJsonPath) {
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
const errorMessage = 'tasksJsonPath is required but was not provided.';
|
} catch (error) {
|
||||||
log.error(errorMessage);
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (!outputDir) {
|
|
||||||
const errorMessage = 'outputDir is required but was not provided.';
|
// Get output directory (defaults to the same directory as the tasks file)
|
||||||
log.error(errorMessage);
|
let outputDir = args.output;
|
||||||
return {
|
if (!outputDir) {
|
||||||
success: false,
|
outputDir = path.dirname(tasksPath);
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
log.info(`Generating task files from ${tasksPath} to ${outputDir}`);
|
||||||
}
|
|
||||||
|
// Execute core generateTaskFiles function in a separate try/catch
|
||||||
// Use the provided paths
|
try {
|
||||||
const tasksPath = tasksJsonPath;
|
// Enable silent mode to prevent logs from being written to stdout
|
||||||
const resolvedOutputDir = outputDir;
|
enableSilentMode();
|
||||||
|
|
||||||
log.info(`Generating task files from ${tasksPath} to ${resolvedOutputDir}`);
|
// The function is synchronous despite being awaited elsewhere
|
||||||
|
generateTaskFiles(tasksPath, outputDir);
|
||||||
// Execute core generateTaskFiles function in a separate try/catch
|
|
||||||
try {
|
// Restore normal logging after task generation
|
||||||
// Enable silent mode to prevent logs from being written to stdout
|
disableSilentMode();
|
||||||
enableSilentMode();
|
} catch (genError) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
// The function is synchronous despite being awaited elsewhere
|
disableSilentMode();
|
||||||
generateTaskFiles(tasksPath, resolvedOutputDir);
|
|
||||||
|
log.error(`Error in generateTaskFiles: ${genError.message}`);
|
||||||
// Restore normal logging after task generation
|
return {
|
||||||
disableSilentMode();
|
success: false,
|
||||||
} catch (genError) {
|
error: { code: 'GENERATE_FILES_ERROR', message: genError.message },
|
||||||
// Make sure to restore normal logging even if there's an error
|
fromCache: false
|
||||||
disableSilentMode();
|
};
|
||||||
|
}
|
||||||
log.error(`Error in generateTaskFiles: ${genError.message}`);
|
|
||||||
return {
|
// Return success with file paths
|
||||||
success: false,
|
return {
|
||||||
error: { code: 'GENERATE_FILES_ERROR', message: genError.message },
|
success: true,
|
||||||
fromCache: false
|
data: {
|
||||||
};
|
message: `Successfully generated task files`,
|
||||||
}
|
tasksPath,
|
||||||
|
outputDir,
|
||||||
// Return success with file paths
|
taskFiles: 'Individual task files have been generated in the output directory'
|
||||||
return {
|
},
|
||||||
success: true,
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
data: {
|
};
|
||||||
message: `Successfully generated task files`,
|
} catch (error) {
|
||||||
tasksPath: tasksPath,
|
// Make sure to restore normal logging if an outer error occurs
|
||||||
outputDir: resolvedOutputDir,
|
disableSilentMode();
|
||||||
taskFiles:
|
|
||||||
'Individual task files have been generated in the output directory'
|
log.error(`Error generating task files: ${error.message}`);
|
||||||
},
|
return {
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
success: false,
|
||||||
};
|
error: { code: 'GENERATE_TASKS_ERROR', message: error.message || 'Unknown error generating task files' },
|
||||||
} catch (error) {
|
fromCache: false
|
||||||
// Make sure to restore normal logging if an outer error occurs
|
};
|
||||||
disableSilentMode();
|
}
|
||||||
|
}
|
||||||
log.error(`Error generating task files: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'GENERATE_TASKS_ERROR',
|
|
||||||
message: error.message || 'Unknown error generating task files'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,134 +0,0 @@
|
|||||||
import { initializeProject } from '../../../../scripts/init.js'; // Import core function and its logger if needed separately
|
|
||||||
import {
|
|
||||||
enableSilentMode,
|
|
||||||
disableSilentMode
|
|
||||||
// isSilentMode // Not used directly here
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import { getProjectRootFromSession } from '../../tools/utils.js'; // Adjust path if necessary
|
|
||||||
import os from 'os'; // Import os module for home directory check
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Direct function wrapper for initializing a project.
|
|
||||||
* Derives target directory from session, sets CWD, and calls core init logic.
|
|
||||||
* @param {object} args - Arguments containing initialization options (addAliases, skipInstall, yes, projectRoot)
|
|
||||||
* @param {object} log - The FastMCP logger instance.
|
|
||||||
* @param {object} context - The context object, must contain { session }.
|
|
||||||
* @returns {Promise<{success: boolean, data?: any, error?: {code: string, message: string}}>} - Standard result object.
|
|
||||||
*/
|
|
||||||
export async function initializeProjectDirect(args, log, context = {}) {
|
|
||||||
const { session } = context;
|
|
||||||
const homeDir = os.homedir();
|
|
||||||
let targetDirectory = null;
|
|
||||||
|
|
||||||
log.info(
|
|
||||||
`CONTEXT received in direct function: ${context ? JSON.stringify(Object.keys(context)) : 'MISSING or Falsy'}`
|
|
||||||
);
|
|
||||||
log.info(
|
|
||||||
`SESSION extracted in direct function: ${session ? 'Exists' : 'MISSING or Falsy'}`
|
|
||||||
);
|
|
||||||
log.info(`Args received in direct function: ${JSON.stringify(args)}`);
|
|
||||||
|
|
||||||
// --- Determine Target Directory ---
|
|
||||||
// 1. Prioritize projectRoot passed directly in args
|
|
||||||
// Ensure it's not null, '/', or the home directory
|
|
||||||
if (
|
|
||||||
args.projectRoot &&
|
|
||||||
args.projectRoot !== '/' &&
|
|
||||||
args.projectRoot !== homeDir
|
|
||||||
) {
|
|
||||||
log.info(`Using projectRoot directly from args: ${args.projectRoot}`);
|
|
||||||
targetDirectory = args.projectRoot;
|
|
||||||
} else {
|
|
||||||
// 2. If args.projectRoot is missing or invalid, THEN try session (as a fallback)
|
|
||||||
log.warn(
|
|
||||||
`args.projectRoot ('${args.projectRoot}') is missing or invalid. Attempting to derive from session.`
|
|
||||||
);
|
|
||||||
const sessionDerivedPath = getProjectRootFromSession(session, log);
|
|
||||||
// Validate the session-derived path as well
|
|
||||||
if (
|
|
||||||
sessionDerivedPath &&
|
|
||||||
sessionDerivedPath !== '/' &&
|
|
||||||
sessionDerivedPath !== homeDir
|
|
||||||
) {
|
|
||||||
log.info(
|
|
||||||
`Using project root derived from session: ${sessionDerivedPath}`
|
|
||||||
);
|
|
||||||
targetDirectory = sessionDerivedPath;
|
|
||||||
} else {
|
|
||||||
log.error(
|
|
||||||
`Could not determine a valid project root. args.projectRoot='${args.projectRoot}', sessionDerivedPath='${sessionDerivedPath}'`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Validate the final targetDirectory
|
|
||||||
if (!targetDirectory) {
|
|
||||||
// This error now covers cases where neither args.projectRoot nor session provided a valid path
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'INVALID_TARGET_DIRECTORY',
|
|
||||||
message: `Cannot initialize project: Could not determine a valid target directory. Please ensure a workspace/folder is open or specify projectRoot.`,
|
|
||||||
details: `Attempted args.projectRoot: ${args.projectRoot}`
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Proceed with validated targetDirectory ---
|
|
||||||
log.info(`Validated target directory for initialization: ${targetDirectory}`);
|
|
||||||
|
|
||||||
const originalCwd = process.cwd();
|
|
||||||
let resultData;
|
|
||||||
let success = false;
|
|
||||||
let errorResult = null;
|
|
||||||
|
|
||||||
log.info(
|
|
||||||
`Temporarily changing CWD to ${targetDirectory} for initialization.`
|
|
||||||
);
|
|
||||||
process.chdir(targetDirectory); // Change CWD to the *validated* targetDirectory
|
|
||||||
|
|
||||||
enableSilentMode(); // Enable silent mode BEFORE calling the core function
|
|
||||||
try {
|
|
||||||
// Always force yes: true when called via MCP to avoid interactive prompts
|
|
||||||
const options = {
|
|
||||||
aliases: args.addAliases,
|
|
||||||
skipInstall: args.skipInstall,
|
|
||||||
yes: true // Force yes mode
|
|
||||||
};
|
|
||||||
|
|
||||||
log.info(`Initializing project with options: ${JSON.stringify(options)}`);
|
|
||||||
const result = await initializeProject(options); // Call core logic
|
|
||||||
|
|
||||||
// Format success result for handleApiResult
|
|
||||||
resultData = {
|
|
||||||
message: 'Project initialized successfully.',
|
|
||||||
next_step:
|
|
||||||
'Now that the project is initialized, the next step is to create the tasks by parsing a PRD. This will create the tasks folder and the initial task files (tasks folder will be created when parse-prd is run). The parse-prd tool will require a prd.txt file as input (typically found in the project root directory, scripts/ directory). You can create a prd.txt file by asking the user about their idea, and then using the scripts/example_prd.txt file as a template to genrate a prd.txt file in scripts/. You may skip all of this if the user already has a prd.txt file. You can THEN use the parse-prd tool to create the tasks. So: step 1 after initialization is to create a prd.txt file in scripts/prd.txt or confirm the user already has one. Step 2 is to use the parse-prd tool to create the tasks. Do not bother looking for tasks after initialization, just use the parse-prd tool to create the tasks after creating a prd.txt from which to parse the tasks. You do NOT need to reinitialize the project to parse-prd.',
|
|
||||||
...result // Include details returned by initializeProject
|
|
||||||
};
|
|
||||||
success = true;
|
|
||||||
log.info(
|
|
||||||
`Project initialization completed successfully in ${targetDirectory}.`
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Core initializeProject failed: ${error.message}`);
|
|
||||||
errorResult = {
|
|
||||||
code: 'INITIALIZATION_FAILED',
|
|
||||||
message: `Core project initialization failed: ${error.message}`,
|
|
||||||
details: error.stack
|
|
||||||
};
|
|
||||||
success = false;
|
|
||||||
} finally {
|
|
||||||
disableSilentMode(); // ALWAYS disable silent mode in finally
|
|
||||||
log.info(`Restoring original CWD: ${originalCwd}`);
|
|
||||||
process.chdir(originalCwd); // Change back to original CWD
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return in format expected by handleApiResult
|
|
||||||
if (success) {
|
|
||||||
return { success: true, data: resultData, fromCache: false };
|
|
||||||
} else {
|
|
||||||
return { success: false, error: errorResult, fromCache: false };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -5,108 +5,79 @@
|
|||||||
|
|
||||||
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for listTasks with error handling and caching.
|
* Direct function wrapper for listTasks with error handling and caching.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments (now expecting tasksJsonPath explicitly).
|
* @param {Object} args - Command arguments (projectRoot is expected to be resolved).
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
|
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
|
||||||
*/
|
*/
|
||||||
export async function listTasksDirect(args, log) {
|
export async function listTasksDirect(args, log) {
|
||||||
// Destructure the explicit tasksJsonPath from args
|
let tasksPath;
|
||||||
const { tasksJsonPath, status, withSubtasks } = args;
|
try {
|
||||||
|
// Find the tasks path first - needed for cache key and execution
|
||||||
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
|
} catch (error) {
|
||||||
|
if (error.code === 'TASKS_FILE_NOT_FOUND') {
|
||||||
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
|
// Return the error structure expected by the calling tool/handler
|
||||||
|
return { success: false, error: { code: error.code, message: error.message }, fromCache: false };
|
||||||
|
}
|
||||||
|
log.error(`Unexpected error finding tasks file: ${error.message}`);
|
||||||
|
// Re-throw for outer catch or return structured error
|
||||||
|
return { success: false, error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message }, fromCache: false };
|
||||||
|
}
|
||||||
|
|
||||||
if (!tasksJsonPath) {
|
// Generate cache key *after* finding tasksPath
|
||||||
log.error('listTasksDirect called without tasksJsonPath');
|
const statusFilter = args.status || 'all';
|
||||||
return {
|
const withSubtasks = args.withSubtasks || false;
|
||||||
success: false,
|
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
|
||||||
error: {
|
|
||||||
code: 'MISSING_ARGUMENT',
|
// Define the action function to be executed on cache miss
|
||||||
message: 'tasksJsonPath is required'
|
const coreListTasksAction = async () => {
|
||||||
},
|
try {
|
||||||
fromCache: false
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
};
|
enableSilentMode();
|
||||||
}
|
|
||||||
|
log.info(`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`);
|
||||||
|
const resultData = listTasks(tasksPath, statusFilter, withSubtasks, 'json');
|
||||||
|
|
||||||
// Use the explicit tasksJsonPath for cache key
|
if (!resultData || !resultData.tasks) {
|
||||||
const statusFilter = status || 'all';
|
log.error('Invalid or empty response from listTasks core function');
|
||||||
const withSubtasksFilter = withSubtasks || false;
|
return { success: false, error: { code: 'INVALID_CORE_RESPONSE', message: 'Invalid or empty response from listTasks core function' } };
|
||||||
const cacheKey = `listTasks:${tasksJsonPath}:${statusFilter}:${withSubtasksFilter}`;
|
}
|
||||||
|
log.info(`Core listTasks function retrieved ${resultData.tasks.length} tasks`);
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
return { success: true, data: resultData };
|
||||||
|
|
||||||
// Define the action function to be executed on cache miss
|
} catch (error) {
|
||||||
const coreListTasksAction = async () => {
|
// Make sure to restore normal logging even if there's an error
|
||||||
try {
|
disableSilentMode();
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
log.error(`Core listTasks function failed: ${error.message}`);
|
||||||
|
return { success: false, error: { code: 'LIST_TASKS_CORE_ERROR', message: error.message || 'Failed to list tasks' } };
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
log.info(
|
// Use the caching utility
|
||||||
`Executing core listTasks function for path: ${tasksJsonPath}, filter: ${statusFilter}, subtasks: ${withSubtasksFilter}`
|
try {
|
||||||
);
|
const result = await getCachedOrExecute({
|
||||||
// Pass the explicit tasksJsonPath to the core function
|
cacheKey,
|
||||||
const resultData = listTasks(
|
actionFn: coreListTasksAction,
|
||||||
tasksJsonPath,
|
log
|
||||||
statusFilter,
|
});
|
||||||
withSubtasksFilter,
|
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
||||||
'json'
|
return result; // Returns { success, data/error, fromCache }
|
||||||
);
|
} catch(error) {
|
||||||
|
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
||||||
if (!resultData || !resultData.tasks) {
|
log.error(`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`);
|
||||||
log.error('Invalid or empty response from listTasks core function');
|
console.error(error.stack);
|
||||||
return {
|
return { success: false, error: { code: 'CACHE_UTIL_ERROR', message: error.message }, fromCache: false };
|
||||||
success: false,
|
}
|
||||||
error: {
|
}
|
||||||
code: 'INVALID_CORE_RESPONSE',
|
|
||||||
message: 'Invalid or empty response from listTasks core function'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
log.info(
|
|
||||||
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
return { success: true, data: resultData };
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Core listTasks function failed: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'LIST_TASKS_CORE_ERROR',
|
|
||||||
message: error.message || 'Failed to list tasks'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the caching utility
|
|
||||||
try {
|
|
||||||
const result = await getCachedOrExecute({
|
|
||||||
cacheKey,
|
|
||||||
actionFn: coreListTasksAction,
|
|
||||||
log
|
|
||||||
});
|
|
||||||
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
|
||||||
} catch (error) {
|
|
||||||
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
|
||||||
log.error(
|
|
||||||
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
|
|
||||||
);
|
|
||||||
console.error(error.stack);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: { code: 'CACHE_UTIL_ERROR', message: error.message },
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -6,127 +6,117 @@
|
|||||||
import { findNextTask } from '../../../../scripts/modules/task-manager.js';
|
import { findNextTask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
import { readJSON } from '../../../../scripts/modules/utils.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for finding the next task to work on with error handling and caching.
|
* Direct function wrapper for finding the next task to work on with error handling and caching.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments
|
* @param {Object} args - Command arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||||
*/
|
*/
|
||||||
export async function nextTaskDirect(args, log) {
|
export async function nextTaskDirect(args, log) {
|
||||||
// Destructure expected args
|
let tasksPath;
|
||||||
const { tasksJsonPath } = args;
|
try {
|
||||||
|
// Find the tasks path first - needed for cache key and execution
|
||||||
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
|
message: error.message
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
if (!tasksJsonPath) {
|
// Generate cache key using task path
|
||||||
log.error('nextTaskDirect called without tasksJsonPath');
|
const cacheKey = `nextTask:${tasksPath}`;
|
||||||
return {
|
|
||||||
success: false,
|
// Define the action function to be executed on cache miss
|
||||||
error: {
|
const coreNextTaskAction = async () => {
|
||||||
code: 'MISSING_ARGUMENT',
|
try {
|
||||||
message: 'tasksJsonPath is required'
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
},
|
enableSilentMode();
|
||||||
fromCache: false
|
|
||||||
};
|
log.info(`Finding next task from ${tasksPath}`);
|
||||||
}
|
|
||||||
|
// Read tasks data
|
||||||
|
const data = readJSON(tasksPath);
|
||||||
|
if (!data || !data.tasks) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'INVALID_TASKS_FILE',
|
||||||
|
message: `No valid tasks found in ${tasksPath}`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the next task
|
||||||
|
const nextTask = findNextTask(data.tasks);
|
||||||
|
|
||||||
|
if (!nextTask) {
|
||||||
|
log.info('No eligible next task found. All tasks are either completed or have unsatisfied dependencies');
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: 'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',
|
||||||
|
nextTask: null,
|
||||||
|
allTasks: data.tasks
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
// Return the next task data with the full tasks array for reference
|
||||||
|
log.info(`Successfully found next task ${nextTask.id}: ${nextTask.title}`);
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
nextTask,
|
||||||
|
allTasks: data.tasks
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error finding next task: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message || 'Failed to find next task'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Generate cache key using the provided task path
|
// Use the caching utility
|
||||||
const cacheKey = `nextTask:${tasksJsonPath}`;
|
try {
|
||||||
|
const result = await getCachedOrExecute({
|
||||||
// Define the action function to be executed on cache miss
|
cacheKey,
|
||||||
const coreNextTaskAction = async () => {
|
actionFn: coreNextTaskAction,
|
||||||
try {
|
log
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
});
|
||||||
enableSilentMode();
|
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
|
||||||
|
return result; // Returns { success, data/error, fromCache }
|
||||||
log.info(`Finding next task from ${tasksJsonPath}`);
|
} catch (error) {
|
||||||
|
// Catch unexpected errors from getCachedOrExecute itself
|
||||||
// Read tasks data using the provided path
|
log.error(`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`);
|
||||||
const data = readJSON(tasksJsonPath);
|
return {
|
||||||
if (!data || !data.tasks) {
|
success: false,
|
||||||
disableSilentMode(); // Disable before return
|
error: {
|
||||||
return {
|
code: 'UNEXPECTED_ERROR',
|
||||||
success: false,
|
message: error.message
|
||||||
error: {
|
},
|
||||||
code: 'INVALID_TASKS_FILE',
|
fromCache: false
|
||||||
message: `No valid tasks found in ${tasksJsonPath}`
|
};
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Find the next task
|
|
||||||
const nextTask = findNextTask(data.tasks);
|
|
||||||
|
|
||||||
if (!nextTask) {
|
|
||||||
log.info(
|
|
||||||
'No eligible next task found. All tasks are either completed or have unsatisfied dependencies'
|
|
||||||
);
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message:
|
|
||||||
'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',
|
|
||||||
nextTask: null,
|
|
||||||
allTasks: data.tasks
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
// Return the next task data with the full tasks array for reference
|
|
||||||
log.info(
|
|
||||||
`Successfully found next task ${nextTask.id}: ${nextTask.title}`
|
|
||||||
);
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
nextTask,
|
|
||||||
allTasks: data.tasks
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error finding next task: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message || 'Failed to find next task'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the caching utility
|
|
||||||
try {
|
|
||||||
const result = await getCachedOrExecute({
|
|
||||||
cacheKey,
|
|
||||||
actionFn: coreNextTaskAction,
|
|
||||||
log
|
|
||||||
});
|
|
||||||
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
|
||||||
} catch (error) {
|
|
||||||
// Catch unexpected errors from getCachedOrExecute itself
|
|
||||||
log.error(
|
|
||||||
`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`
|
|
||||||
);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UNEXPECTED_ERROR',
|
|
||||||
message: error.message
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -6,210 +6,109 @@
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import { parsePRD } from '../../../../scripts/modules/task-manager.js';
|
import { parsePRD } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import {
|
|
||||||
getAnthropicClientForMCP,
|
|
||||||
getModelConfig
|
|
||||||
} from '../utils/ai-client-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for parsing PRD documents and generating tasks.
|
* Direct function wrapper for parsing PRD documents and generating tasks.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing input, numTasks or tasks, and output options.
|
* @param {Object} args - Command arguments containing input, numTasks or tasks, and output options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @param {Object} context - Context object containing session data.
|
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function parsePRDDirect(args, log, context = {}) {
|
export async function parsePRDDirect(args, log) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
try {
|
||||||
|
log.info(`Parsing PRD document with args: ${JSON.stringify(args)}`);
|
||||||
try {
|
|
||||||
log.info(`Parsing PRD document with args: ${JSON.stringify(args)}`);
|
// Check required parameters
|
||||||
|
if (!args.input) {
|
||||||
// Initialize AI client for PRD parsing
|
const errorMessage = 'No input file specified. Please provide an input PRD document path.';
|
||||||
let aiClient;
|
log.error(errorMessage);
|
||||||
try {
|
return {
|
||||||
aiClient = getAnthropicClientForMCP(session, log);
|
success: false,
|
||||||
} catch (error) {
|
error: { code: 'MISSING_INPUT_FILE', message: errorMessage },
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
fromCache: false
|
||||||
return {
|
};
|
||||||
success: false,
|
}
|
||||||
error: {
|
|
||||||
code: 'AI_CLIENT_ERROR',
|
// Resolve input path (relative to project root if provided)
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
const projectRoot = args.projectRoot || process.cwd();
|
||||||
},
|
const inputPath = path.isAbsolute(args.input) ? args.input : path.resolve(projectRoot, args.input);
|
||||||
fromCache: false
|
|
||||||
};
|
// Determine output path
|
||||||
}
|
let outputPath;
|
||||||
|
if (args.output) {
|
||||||
// Validate required parameters
|
outputPath = path.isAbsolute(args.output) ? args.output : path.resolve(projectRoot, args.output);
|
||||||
if (!args.projectRoot) {
|
} else {
|
||||||
const errorMessage = 'Project root is required for parsePRDDirect';
|
// Default to tasks/tasks.json in the project root
|
||||||
log.error(errorMessage);
|
outputPath = path.resolve(projectRoot, 'tasks', 'tasks.json');
|
||||||
return {
|
}
|
||||||
success: false,
|
|
||||||
error: { code: 'MISSING_PROJECT_ROOT', message: errorMessage },
|
// Verify input file exists
|
||||||
fromCache: false
|
if (!fs.existsSync(inputPath)) {
|
||||||
};
|
const errorMessage = `Input file not found: ${inputPath}`;
|
||||||
}
|
log.error(errorMessage);
|
||||||
|
return {
|
||||||
if (!args.input) {
|
success: false,
|
||||||
const errorMessage = 'Input file path is required for parsePRDDirect';
|
error: { code: 'INPUT_FILE_NOT_FOUND', message: errorMessage },
|
||||||
log.error(errorMessage);
|
fromCache: false
|
||||||
return {
|
};
|
||||||
success: false,
|
}
|
||||||
error: { code: 'MISSING_INPUT_PATH', message: errorMessage },
|
|
||||||
fromCache: false
|
// Parse number of tasks - handle both string and number values
|
||||||
};
|
let numTasks = 10; // Default
|
||||||
}
|
if (args.numTasks) {
|
||||||
|
numTasks = typeof args.numTasks === 'string' ? parseInt(args.numTasks, 10) : args.numTasks;
|
||||||
if (!args.output) {
|
if (isNaN(numTasks)) {
|
||||||
const errorMessage = 'Output file path is required for parsePRDDirect';
|
numTasks = 10; // Fallback to default if parsing fails
|
||||||
log.error(errorMessage);
|
log.warn(`Invalid numTasks value: ${args.numTasks}. Using default: 10`);
|
||||||
return {
|
}
|
||||||
success: false,
|
}
|
||||||
error: { code: 'MISSING_OUTPUT_PATH', message: errorMessage },
|
|
||||||
fromCache: false
|
log.info(`Preparing to parse PRD from ${inputPath} and output to ${outputPath} with ${numTasks} tasks`);
|
||||||
};
|
|
||||||
}
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
|
enableSilentMode();
|
||||||
// Resolve input path (expecting absolute path or path relative to project root)
|
|
||||||
const projectRoot = args.projectRoot;
|
// Execute core parsePRD function (which is not async but we'll await it to maintain consistency)
|
||||||
const inputPath = path.isAbsolute(args.input)
|
await parsePRD(inputPath, outputPath, numTasks);
|
||||||
? args.input
|
|
||||||
: path.resolve(projectRoot, args.input);
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
// Verify input file exists
|
|
||||||
if (!fs.existsSync(inputPath)) {
|
// Since parsePRD doesn't return a value but writes to a file, we'll read the result
|
||||||
const errorMessage = `Input file not found: ${inputPath}`;
|
// to return it to the caller
|
||||||
log.error(errorMessage);
|
if (fs.existsSync(outputPath)) {
|
||||||
return {
|
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||||
success: false,
|
log.info(`Successfully parsed PRD and generated ${tasksData.tasks?.length || 0} tasks`);
|
||||||
error: {
|
|
||||||
code: 'INPUT_FILE_NOT_FOUND',
|
return {
|
||||||
message: errorMessage,
|
success: true,
|
||||||
details: `Checked path: ${inputPath}\nProject root: ${projectRoot}\nInput argument: ${args.input}`
|
data: {
|
||||||
},
|
message: `Successfully generated ${tasksData.tasks?.length || 0} tasks from PRD`,
|
||||||
fromCache: false
|
taskCount: tasksData.tasks?.length || 0,
|
||||||
};
|
outputPath
|
||||||
}
|
},
|
||||||
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
// Resolve output path (expecting absolute path or path relative to project root)
|
};
|
||||||
const outputPath = path.isAbsolute(args.output)
|
} else {
|
||||||
? args.output
|
const errorMessage = `Tasks file was not created at ${outputPath}`;
|
||||||
: path.resolve(projectRoot, args.output);
|
log.error(errorMessage);
|
||||||
|
return {
|
||||||
// Ensure output directory exists
|
success: false,
|
||||||
const outputDir = path.dirname(outputPath);
|
error: { code: 'OUTPUT_FILE_NOT_CREATED', message: errorMessage },
|
||||||
if (!fs.existsSync(outputDir)) {
|
fromCache: false
|
||||||
log.info(`Creating output directory: ${outputDir}`);
|
};
|
||||||
fs.mkdirSync(outputDir, { recursive: true });
|
}
|
||||||
}
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
// Parse number of tasks - handle both string and number values
|
disableSilentMode();
|
||||||
let numTasks = 10; // Default
|
|
||||||
if (args.numTasks) {
|
log.error(`Error parsing PRD: ${error.message}`);
|
||||||
numTasks =
|
return {
|
||||||
typeof args.numTasks === 'string'
|
success: false,
|
||||||
? parseInt(args.numTasks, 10)
|
error: { code: 'PARSE_PRD_ERROR', message: error.message || 'Unknown error parsing PRD' },
|
||||||
: args.numTasks;
|
fromCache: false
|
||||||
if (isNaN(numTasks)) {
|
};
|
||||||
numTasks = 10; // Fallback to default if parsing fails
|
}
|
||||||
log.warn(`Invalid numTasks value: ${args.numTasks}. Using default: 10`);
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the append flag from args
|
|
||||||
const append = Boolean(args.append) === true;
|
|
||||||
|
|
||||||
// Log key parameters including append flag
|
|
||||||
log.info(
|
|
||||||
`Preparing to parse PRD from ${inputPath} and output to ${outputPath} with ${numTasks} tasks, append mode: ${append}`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Create the logger wrapper for proper logging in the core function
|
|
||||||
const logWrapper = {
|
|
||||||
info: (message, ...args) => log.info(message, ...args),
|
|
||||||
warn: (message, ...args) => log.warn(message, ...args),
|
|
||||||
error: (message, ...args) => log.error(message, ...args),
|
|
||||||
debug: (message, ...args) => log.debug && log.debug(message, ...args),
|
|
||||||
success: (message, ...args) => log.info(message, ...args) // Map success to info
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get model config from session
|
|
||||||
const modelConfig = getModelConfig(session);
|
|
||||||
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
|
||||||
try {
|
|
||||||
// Make sure the output directory exists
|
|
||||||
const outputDir = path.dirname(outputPath);
|
|
||||||
if (!fs.existsSync(outputDir)) {
|
|
||||||
log.info(`Creating output directory: ${outputDir}`);
|
|
||||||
fs.mkdirSync(outputDir, { recursive: true });
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute core parsePRD function with AI client
|
|
||||||
await parsePRD(
|
|
||||||
inputPath,
|
|
||||||
outputPath,
|
|
||||||
numTasks,
|
|
||||||
{
|
|
||||||
mcpLog: logWrapper,
|
|
||||||
session,
|
|
||||||
append
|
|
||||||
},
|
|
||||||
aiClient,
|
|
||||||
modelConfig
|
|
||||||
);
|
|
||||||
|
|
||||||
// Since parsePRD doesn't return a value but writes to a file, we'll read the result
|
|
||||||
// to return it to the caller
|
|
||||||
if (fs.existsSync(outputPath)) {
|
|
||||||
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
|
||||||
const actionVerb = append ? 'appended' : 'generated';
|
|
||||||
const message = `Successfully ${actionVerb} ${tasksData.tasks?.length || 0} tasks from PRD`;
|
|
||||||
|
|
||||||
log.info(message);
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message,
|
|
||||||
taskCount: tasksData.tasks?.length || 0,
|
|
||||||
outputPath,
|
|
||||||
appended: append
|
|
||||||
},
|
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
const errorMessage = `Tasks file was not created at ${outputPath}`;
|
|
||||||
log.error(errorMessage);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: { code: 'OUTPUT_FILE_NOT_CREATED', message: errorMessage },
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
// Always restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error parsing PRD: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'PARSE_PRD_ERROR',
|
|
||||||
message: error.message || 'Unknown error parsing PRD'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,102 +3,81 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { removeDependency } from '../../../../scripts/modules/dependency-manager.js';
|
import { removeDependency } from '../../../../scripts/modules/dependency-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove a dependency from a task
|
* Remove a dependency from a task
|
||||||
* @param {Object} args - Function arguments
|
* @param {Object} args - Function arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {string|number} args.id - Task ID to remove dependency from
|
* @param {string|number} args.id - Task ID to remove dependency from
|
||||||
* @param {string|number} args.dependsOn - Task ID to remove as a dependency
|
* @param {string|number} args.dependsOn - Task ID to remove as a dependency
|
||||||
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function removeDependencyDirect(args, log) {
|
export async function removeDependencyDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath, id, dependsOn } = args;
|
log.info(`Removing dependency with args: ${JSON.stringify(args)}`);
|
||||||
try {
|
|
||||||
log.info(`Removing dependency with args: ${JSON.stringify(args)}`);
|
// Validate required parameters
|
||||||
|
if (!args.id) {
|
||||||
// Check if tasksJsonPath was provided
|
return {
|
||||||
if (!tasksJsonPath) {
|
success: false,
|
||||||
log.error('removeDependencyDirect called without tasksJsonPath');
|
error: {
|
||||||
return {
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
success: false,
|
message: 'Task ID (id) is required'
|
||||||
error: {
|
}
|
||||||
code: 'MISSING_ARGUMENT',
|
};
|
||||||
message: 'tasksJsonPath is required'
|
}
|
||||||
}
|
|
||||||
};
|
if (!args.dependsOn) {
|
||||||
}
|
return {
|
||||||
|
success: false,
|
||||||
// Validate required parameters
|
error: {
|
||||||
if (!id) {
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
return {
|
message: 'Dependency ID (dependsOn) is required'
|
||||||
success: false,
|
}
|
||||||
error: {
|
};
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
}
|
||||||
message: 'Task ID (id) is required'
|
|
||||||
}
|
// Find the tasks.json path
|
||||||
};
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
}
|
|
||||||
|
// Format IDs for the core function
|
||||||
if (!dependsOn) {
|
const taskId = args.id.includes && args.id.includes('.') ? args.id : parseInt(args.id, 10);
|
||||||
return {
|
const dependencyId = args.dependsOn.includes && args.dependsOn.includes('.') ? args.dependsOn : parseInt(args.dependsOn, 10);
|
||||||
success: false,
|
|
||||||
error: {
|
log.info(`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`);
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
|
||||||
message: 'Dependency ID (dependsOn) is required'
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
}
|
enableSilentMode();
|
||||||
};
|
|
||||||
}
|
// Call the core function
|
||||||
|
await removeDependency(tasksPath, taskId, dependencyId);
|
||||||
// Use provided path
|
|
||||||
const tasksPath = tasksJsonPath;
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
// Format IDs for the core function
|
|
||||||
const taskId =
|
return {
|
||||||
id && id.includes && id.includes('.') ? id : parseInt(id, 10);
|
success: true,
|
||||||
const dependencyId =
|
data: {
|
||||||
dependsOn && dependsOn.includes && dependsOn.includes('.')
|
message: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,
|
||||||
? dependsOn
|
taskId: taskId,
|
||||||
: parseInt(dependsOn, 10);
|
dependencyId: dependencyId
|
||||||
|
}
|
||||||
log.info(
|
};
|
||||||
`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`
|
} catch (error) {
|
||||||
);
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
log.error(`Error in removeDependencyDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
// Call the core function using the provided tasksPath
|
success: false,
|
||||||
await removeDependency(tasksPath, taskId, dependencyId);
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
// Restore normal logging
|
message: error.message
|
||||||
disableSilentMode();
|
}
|
||||||
|
};
|
||||||
return {
|
}
|
||||||
success: true,
|
}
|
||||||
data: {
|
|
||||||
message: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,
|
|
||||||
taskId: taskId,
|
|
||||||
dependencyId: dependencyId
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error in removeDependencyDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,120 +3,93 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { removeSubtask } from '../../../../scripts/modules/task-manager.js';
|
import { removeSubtask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove a subtask from its parent task
|
* Remove a subtask from its parent task
|
||||||
* @param {Object} args - Function arguments
|
* @param {Object} args - Function arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {string} args.id - Subtask ID in format "parentId.subtaskId" (required)
|
* @param {string} args.id - Subtask ID in format "parentId.subtaskId" (required)
|
||||||
* @param {boolean} [args.convert] - Whether to convert the subtask to a standalone task
|
* @param {boolean} [args.convert] - Whether to convert the subtask to a standalone task
|
||||||
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
* @param {boolean} [args.skipGenerate] - Skip regenerating task files
|
* @param {boolean} [args.skipGenerate] - Skip regenerating task files
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function removeSubtaskDirect(args, log) {
|
export async function removeSubtaskDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath, id, convert, skipGenerate } = args;
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
try {
|
enableSilentMode();
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
|
if (!args.id) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
|
message: 'Subtask ID is required and must be in format "parentId.subtaskId"'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate subtask ID format
|
||||||
|
if (!args.id.includes('.')) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
|
message: `Invalid subtask ID format: ${args.id}. Expected format: "parentId.subtaskId"`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
// Find the tasks.json path
|
||||||
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
// Check if tasksJsonPath was provided
|
|
||||||
if (!tasksJsonPath) {
|
// Convert convertToTask to a boolean
|
||||||
log.error('removeSubtaskDirect called without tasksJsonPath');
|
const convertToTask = args.convert === true;
|
||||||
disableSilentMode(); // Disable before returning
|
|
||||||
return {
|
// Determine if we should generate files
|
||||||
success: false,
|
const generateFiles = !args.skipGenerate;
|
||||||
error: {
|
|
||||||
code: 'MISSING_ARGUMENT',
|
log.info(`Removing subtask ${args.id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`);
|
||||||
message: 'tasksJsonPath is required'
|
|
||||||
}
|
const result = await removeSubtask(tasksPath, args.id, convertToTask, generateFiles);
|
||||||
};
|
|
||||||
}
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
if (!id) {
|
|
||||||
disableSilentMode(); // Disable before returning
|
if (convertToTask && result) {
|
||||||
return {
|
// Return info about the converted task
|
||||||
success: false,
|
return {
|
||||||
error: {
|
success: true,
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
data: {
|
||||||
message:
|
message: `Subtask ${args.id} successfully converted to task #${result.id}`,
|
||||||
'Subtask ID is required and must be in format "parentId.subtaskId"'
|
task: result
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
} else {
|
||||||
|
// Return simple success message for deletion
|
||||||
// Validate subtask ID format
|
return {
|
||||||
if (!id.includes('.')) {
|
success: true,
|
||||||
disableSilentMode(); // Disable before returning
|
data: {
|
||||||
return {
|
message: `Subtask ${args.id} successfully removed`
|
||||||
success: false,
|
}
|
||||||
error: {
|
};
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
}
|
||||||
message: `Invalid subtask ID format: ${id}. Expected format: "parentId.subtaskId"`
|
} catch (error) {
|
||||||
}
|
// Ensure silent mode is disabled even if an outer error occurs
|
||||||
};
|
disableSilentMode();
|
||||||
}
|
|
||||||
|
log.error(`Error in removeSubtaskDirect: ${error.message}`);
|
||||||
// Use provided path
|
return {
|
||||||
const tasksPath = tasksJsonPath;
|
success: false,
|
||||||
|
error: {
|
||||||
// Convert convertToTask to a boolean
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
const convertToTask = convert === true;
|
message: error.message
|
||||||
|
}
|
||||||
// Determine if we should generate files
|
};
|
||||||
const generateFiles = !skipGenerate;
|
}
|
||||||
|
}
|
||||||
log.info(
|
|
||||||
`Removing subtask ${id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Use the provided tasksPath
|
|
||||||
const result = await removeSubtask(
|
|
||||||
tasksPath,
|
|
||||||
id,
|
|
||||||
convertToTask,
|
|
||||||
generateFiles
|
|
||||||
);
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
if (convertToTask && result) {
|
|
||||||
// Return info about the converted task
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: `Subtask ${id} successfully converted to task #${result.id}`,
|
|
||||||
task: result
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
// Return simple success message for deletion
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: `Subtask ${id} successfully removed`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Ensure silent mode is disabled even if an outer error occurs
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error in removeSubtaskDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,166 +3,102 @@
|
|||||||
* Direct function implementation for removing a task
|
* Direct function implementation for removing a task
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import {
|
import { removeTask } from '../../../../scripts/modules/task-manager.js';
|
||||||
removeTask,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
taskExists
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
} from '../../../../scripts/modules/task-manager.js';
|
|
||||||
import {
|
|
||||||
enableSilentMode,
|
|
||||||
disableSilentMode,
|
|
||||||
readJSON
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for removeTask with error handling.
|
* Direct function wrapper for removeTask with error handling.
|
||||||
* Supports removing multiple tasks at once with comma-separated IDs.
|
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments
|
* @param {Object} args - Command arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {string} args.id - The ID(s) of the task(s) or subtask(s) to remove (comma-separated for multiple).
|
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: false }
|
* @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: false }
|
||||||
*/
|
*/
|
||||||
export async function removeTaskDirect(args, log) {
|
export async function removeTaskDirect(args, log) {
|
||||||
// Destructure expected args
|
try {
|
||||||
const { tasksJsonPath, id } = args;
|
// Find the tasks path first
|
||||||
try {
|
let tasksPath;
|
||||||
// Check if tasksJsonPath was provided
|
try {
|
||||||
if (!tasksJsonPath) {
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
log.error('removeTaskDirect called without tasksJsonPath');
|
} catch (error) {
|
||||||
return {
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
success: false,
|
return {
|
||||||
error: {
|
success: false,
|
||||||
code: 'MISSING_ARGUMENT',
|
error: {
|
||||||
message: 'tasksJsonPath is required'
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
},
|
message: error.message
|
||||||
fromCache: false
|
},
|
||||||
};
|
fromCache: false
|
||||||
}
|
};
|
||||||
|
}
|
||||||
// Validate task ID parameter
|
|
||||||
if (!id) {
|
// Validate task ID parameter
|
||||||
log.error('Task ID is required');
|
const taskId = args.id;
|
||||||
return {
|
if (!taskId) {
|
||||||
success: false,
|
log.error('Task ID is required');
|
||||||
error: {
|
return {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
success: false,
|
||||||
message: 'Task ID is required'
|
error: {
|
||||||
},
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
fromCache: false
|
message: 'Task ID is required'
|
||||||
};
|
},
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Split task IDs if comma-separated
|
}
|
||||||
const taskIdArray = id.split(',').map((taskId) => taskId.trim());
|
|
||||||
|
// Skip confirmation in the direct function since it's handled by the client
|
||||||
log.info(
|
log.info(`Removing task with ID: ${taskId} from ${tasksPath}`);
|
||||||
`Removing ${taskIdArray.length} task(s) with ID(s): ${taskIdArray.join(', ')} from ${tasksJsonPath}`
|
|
||||||
);
|
try {
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
// Validate all task IDs exist before proceeding
|
enableSilentMode();
|
||||||
const data = readJSON(tasksJsonPath);
|
|
||||||
if (!data || !data.tasks) {
|
// Call the core removeTask function
|
||||||
return {
|
const result = await removeTask(tasksPath, taskId);
|
||||||
success: false,
|
|
||||||
error: {
|
// Restore normal logging
|
||||||
code: 'INVALID_TASKS_FILE',
|
disableSilentMode();
|
||||||
message: `No valid tasks found in ${tasksJsonPath}`
|
|
||||||
},
|
log.info(`Successfully removed task: ${taskId}`);
|
||||||
fromCache: false
|
|
||||||
};
|
// Return the result
|
||||||
}
|
return {
|
||||||
|
success: true,
|
||||||
const invalidTasks = taskIdArray.filter(
|
data: {
|
||||||
(taskId) => !taskExists(data.tasks, taskId)
|
message: result.message,
|
||||||
);
|
taskId: taskId,
|
||||||
|
tasksPath: tasksPath,
|
||||||
if (invalidTasks.length > 0) {
|
removedTask: result.removedTask
|
||||||
return {
|
},
|
||||||
success: false,
|
fromCache: false
|
||||||
error: {
|
};
|
||||||
code: 'INVALID_TASK_ID',
|
} catch (error) {
|
||||||
message: `The following tasks were not found: ${invalidTasks.join(', ')}`
|
// Make sure to restore normal logging even if there's an error
|
||||||
},
|
disableSilentMode();
|
||||||
fromCache: false
|
|
||||||
};
|
log.error(`Error removing task: ${error.message}`);
|
||||||
}
|
return {
|
||||||
|
success: false,
|
||||||
// Remove tasks one by one
|
error: {
|
||||||
const results = [];
|
code: error.code || 'REMOVE_TASK_ERROR',
|
||||||
|
message: error.message || 'Failed to remove task'
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
},
|
||||||
enableSilentMode();
|
fromCache: false
|
||||||
|
};
|
||||||
try {
|
}
|
||||||
for (const taskId of taskIdArray) {
|
} catch (error) {
|
||||||
try {
|
// Ensure silent mode is disabled even if an outer error occurs
|
||||||
const result = await removeTask(tasksJsonPath, taskId);
|
disableSilentMode();
|
||||||
results.push({
|
|
||||||
taskId,
|
// Catch any unexpected errors
|
||||||
success: true,
|
log.error(`Unexpected error in removeTaskDirect: ${error.message}`);
|
||||||
message: result.message,
|
return {
|
||||||
removedTask: result.removedTask
|
success: false,
|
||||||
});
|
error: {
|
||||||
log.info(`Successfully removed task: ${taskId}`);
|
code: 'UNEXPECTED_ERROR',
|
||||||
} catch (error) {
|
message: error.message
|
||||||
results.push({
|
},
|
||||||
taskId,
|
fromCache: false
|
||||||
success: false,
|
};
|
||||||
error: error.message
|
}
|
||||||
});
|
}
|
||||||
log.error(`Error removing task ${taskId}: ${error.message}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if all tasks were successfully removed
|
|
||||||
const successfulRemovals = results.filter((r) => r.success);
|
|
||||||
const failedRemovals = results.filter((r) => !r.success);
|
|
||||||
|
|
||||||
if (successfulRemovals.length === 0) {
|
|
||||||
// All removals failed
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'REMOVE_TASK_ERROR',
|
|
||||||
message: 'Failed to remove any tasks',
|
|
||||||
details: failedRemovals
|
|
||||||
.map((r) => `${r.taskId}: ${r.error}`)
|
|
||||||
.join('; ')
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// At least some tasks were removed successfully
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
totalTasks: taskIdArray.length,
|
|
||||||
successful: successfulRemovals.length,
|
|
||||||
failed: failedRemovals.length,
|
|
||||||
results: results,
|
|
||||||
tasksPath: tasksJsonPath
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Ensure silent mode is disabled even if an outer error occurs
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
// Catch any unexpected errors
|
|
||||||
log.error(`Unexpected error in removeTaskDirect: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UNEXPECTED_ERROR',
|
|
||||||
message: error.message
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,116 +4,106 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { setTaskStatus } from '../../../../scripts/modules/task-manager.js';
|
import { setTaskStatus } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode,
|
|
||||||
isSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for setTaskStatus with error handling.
|
* Direct function wrapper for setTaskStatus with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing id, status and tasksJsonPath.
|
* @param {Object} args - Command arguments containing id, status and file path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function setTaskStatusDirect(args, log) {
|
export async function setTaskStatusDirect(args, log) {
|
||||||
// Destructure expected args, including the resolved tasksJsonPath
|
try {
|
||||||
const { tasksJsonPath, id, status } = args;
|
log.info(`Setting task status with args: ${JSON.stringify(args)}`);
|
||||||
try {
|
|
||||||
log.info(`Setting task status with args: ${JSON.stringify(args)}`);
|
// Check required parameters
|
||||||
|
if (!args.id) {
|
||||||
// Check if tasksJsonPath was provided
|
const errorMessage = 'No task ID specified. Please provide a task ID to update.';
|
||||||
if (!tasksJsonPath) {
|
log.error(errorMessage);
|
||||||
const errorMessage = 'tasksJsonPath is required but was not provided.';
|
return {
|
||||||
log.error(errorMessage);
|
success: false,
|
||||||
return {
|
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
||||||
success: false,
|
fromCache: false
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
};
|
||||||
fromCache: false
|
}
|
||||||
};
|
|
||||||
}
|
if (!args.status) {
|
||||||
|
const errorMessage = 'No status specified. Please provide a new status value.';
|
||||||
// Check required parameters (id and status)
|
log.error(errorMessage);
|
||||||
if (!id) {
|
return {
|
||||||
const errorMessage =
|
success: false,
|
||||||
'No task ID specified. Please provide a task ID to update.';
|
error: { code: 'MISSING_STATUS', message: errorMessage },
|
||||||
log.error(errorMessage);
|
fromCache: false
|
||||||
return {
|
};
|
||||||
success: false,
|
}
|
||||||
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
|
||||||
fromCache: false
|
// Get tasks file path
|
||||||
};
|
let tasksPath;
|
||||||
}
|
try {
|
||||||
|
// The enhanced findTasksJsonPath will now search in parent directories if needed
|
||||||
if (!status) {
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
const errorMessage =
|
log.info(`Found tasks file at: ${tasksPath}`);
|
||||||
'No status specified. Please provide a new status value.';
|
} catch (error) {
|
||||||
log.error(errorMessage);
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'MISSING_STATUS', message: errorMessage },
|
error: {
|
||||||
fromCache: false
|
code: 'TASKS_FILE_ERROR',
|
||||||
};
|
message: `${error.message}\n\nPlease ensure you are in a Task Master project directory or use the --project-root parameter to specify the path to your project.`
|
||||||
}
|
},
|
||||||
|
fromCache: false
|
||||||
// Use the provided path
|
};
|
||||||
const tasksPath = tasksJsonPath;
|
}
|
||||||
|
|
||||||
// Execute core setTaskStatus function
|
// Execute core setTaskStatus function
|
||||||
const taskId = id;
|
// We need to handle the arguments correctly - this function expects tasksPath, taskIdInput, newStatus
|
||||||
const newStatus = status;
|
const taskId = args.id;
|
||||||
|
const newStatus = args.status;
|
||||||
log.info(`Setting task ${taskId} status to "${newStatus}"`);
|
|
||||||
|
log.info(`Setting task ${taskId} status to "${newStatus}"`);
|
||||||
// Call the core function with proper silent mode handling
|
|
||||||
enableSilentMode(); // Enable silent mode before calling core function
|
// Call the core function
|
||||||
try {
|
try {
|
||||||
// Call the core function
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
await setTaskStatus(tasksPath, taskId, newStatus, { mcpLog: log });
|
enableSilentMode();
|
||||||
|
|
||||||
log.info(`Successfully set task ${taskId} status to ${newStatus}`);
|
await setTaskStatus(tasksPath, taskId, newStatus);
|
||||||
|
|
||||||
// Return success data
|
// Restore normal logging
|
||||||
const result = {
|
disableSilentMode();
|
||||||
success: true,
|
|
||||||
data: {
|
log.info(`Successfully set task ${taskId} status to ${newStatus}`);
|
||||||
message: `Successfully updated task ${taskId} status to "${newStatus}"`,
|
|
||||||
taskId,
|
// Return success data
|
||||||
status: newStatus,
|
return {
|
||||||
tasksPath: tasksPath // Return the path used
|
success: true,
|
||||||
},
|
data: {
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
message: `Successfully updated task ${taskId} status to "${newStatus}"`,
|
||||||
};
|
taskId,
|
||||||
return result;
|
status: newStatus,
|
||||||
} catch (error) {
|
tasksPath
|
||||||
log.error(`Error setting task status: ${error.message}`);
|
},
|
||||||
return {
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
success: false,
|
};
|
||||||
error: {
|
} catch (error) {
|
||||||
code: 'SET_STATUS_ERROR',
|
// Make sure to restore normal logging even if there's an error
|
||||||
message: error.message || 'Unknown error setting task status'
|
disableSilentMode();
|
||||||
},
|
|
||||||
fromCache: false
|
log.error(`Error setting task status: ${error.message}`);
|
||||||
};
|
return {
|
||||||
} finally {
|
success: false,
|
||||||
// ALWAYS restore normal logging in finally block
|
error: { code: 'SET_STATUS_ERROR', message: error.message || 'Unknown error setting task status' },
|
||||||
disableSilentMode();
|
fromCache: false
|
||||||
}
|
};
|
||||||
} catch (error) {
|
}
|
||||||
// Ensure silent mode is disabled if there was an uncaught error in the outer try block
|
} catch (error) {
|
||||||
if (isSilentMode()) {
|
log.error(`Error setting task status: ${error.message}`);
|
||||||
disableSilentMode();
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: { code: 'SET_STATUS_ERROR', message: error.message || 'Unknown error setting task status' },
|
||||||
log.error(`Error setting task status: ${error.message}`);
|
fromCache: false
|
||||||
return {
|
};
|
||||||
success: false,
|
}
|
||||||
error: {
|
}
|
||||||
code: 'SET_STATUS_ERROR',
|
|
||||||
message: error.message || 'Unknown error setting task status'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -6,140 +6,131 @@
|
|||||||
import { findTaskById } from '../../../../scripts/modules/utils.js';
|
import { findTaskById } from '../../../../scripts/modules/utils.js';
|
||||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
import { readJSON } from '../../../../scripts/modules/utils.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for showing task details with error handling and caching.
|
* Direct function wrapper for showing task details with error handling and caching.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments
|
* @param {Object} args - Command arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
|
||||||
* @param {string} args.id - The ID of the task or subtask to show.
|
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<Object>} - Task details result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Task details result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||||
*/
|
*/
|
||||||
export async function showTaskDirect(args, log) {
|
export async function showTaskDirect(args, log) {
|
||||||
// Destructure expected args
|
let tasksPath;
|
||||||
const { tasksJsonPath, id } = args;
|
try {
|
||||||
|
// Find the tasks path first - needed for cache key and execution
|
||||||
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
|
message: error.message
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
if (!tasksJsonPath) {
|
// Validate task ID
|
||||||
log.error('showTaskDirect called without tasksJsonPath');
|
const taskId = args.id;
|
||||||
return {
|
if (!taskId) {
|
||||||
success: false,
|
log.error('Task ID is required');
|
||||||
error: {
|
return {
|
||||||
code: 'MISSING_ARGUMENT',
|
success: false,
|
||||||
message: 'tasksJsonPath is required'
|
error: {
|
||||||
},
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
fromCache: false
|
message: 'Task ID is required'
|
||||||
};
|
},
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Validate task ID
|
// Generate cache key using task path and ID
|
||||||
const taskId = id;
|
const cacheKey = `showTask:${tasksPath}:${taskId}`;
|
||||||
if (!taskId) {
|
|
||||||
log.error('Task ID is required');
|
// Define the action function to be executed on cache miss
|
||||||
return {
|
const coreShowTaskAction = async () => {
|
||||||
success: false,
|
try {
|
||||||
error: {
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
enableSilentMode();
|
||||||
message: 'Task ID is required'
|
|
||||||
},
|
log.info(`Retrieving task details for ID: ${taskId} from ${tasksPath}`);
|
||||||
fromCache: false
|
|
||||||
};
|
// Read tasks data
|
||||||
}
|
const data = readJSON(tasksPath);
|
||||||
|
if (!data || !data.tasks) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'INVALID_TASKS_FILE',
|
||||||
|
message: `No valid tasks found in ${tasksPath}`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the specific task
|
||||||
|
const task = findTaskById(data.tasks, taskId);
|
||||||
|
|
||||||
|
if (!task) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'TASK_NOT_FOUND',
|
||||||
|
message: `Task with ID ${taskId} not found`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
// Return the task data with the full tasks array for reference
|
||||||
|
// (needed for formatDependenciesWithStatus function in UI)
|
||||||
|
log.info(`Successfully found task ${taskId}`);
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
task,
|
||||||
|
allTasks: data.tasks
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error showing task: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message || 'Failed to show task details'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Generate cache key using the provided task path and ID
|
// Use the caching utility
|
||||||
const cacheKey = `showTask:${tasksJsonPath}:${taskId}`;
|
try {
|
||||||
|
const result = await getCachedOrExecute({
|
||||||
// Define the action function to be executed on cache miss
|
cacheKey,
|
||||||
const coreShowTaskAction = async () => {
|
actionFn: coreShowTaskAction,
|
||||||
try {
|
log
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
});
|
||||||
enableSilentMode();
|
log.info(`showTaskDirect completed. From cache: ${result.fromCache}`);
|
||||||
|
return result; // Returns { success, data/error, fromCache }
|
||||||
log.info(
|
} catch (error) {
|
||||||
`Retrieving task details for ID: ${taskId} from ${tasksJsonPath}`
|
// Catch unexpected errors from getCachedOrExecute itself
|
||||||
);
|
disableSilentMode();
|
||||||
|
log.error(`Unexpected error during getCachedOrExecute for showTask: ${error.message}`);
|
||||||
// Read tasks data using the provided path
|
return {
|
||||||
const data = readJSON(tasksJsonPath);
|
success: false,
|
||||||
if (!data || !data.tasks) {
|
error: {
|
||||||
disableSilentMode(); // Disable before returning
|
code: 'UNEXPECTED_ERROR',
|
||||||
return {
|
message: error.message
|
||||||
success: false,
|
},
|
||||||
error: {
|
fromCache: false
|
||||||
code: 'INVALID_TASKS_FILE',
|
};
|
||||||
message: `No valid tasks found in ${tasksJsonPath}`
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the specific task
|
|
||||||
const task = findTaskById(data.tasks, taskId);
|
|
||||||
|
|
||||||
if (!task) {
|
|
||||||
disableSilentMode(); // Disable before returning
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'TASK_NOT_FOUND',
|
|
||||||
message: `Task with ID ${taskId} not found`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
// Return the task data with the full tasks array for reference
|
|
||||||
// (needed for formatDependenciesWithStatus function in UI)
|
|
||||||
log.info(`Successfully found task ${taskId}`);
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
task,
|
|
||||||
allTasks: data.tasks
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error showing task: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message || 'Failed to show task details'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the caching utility
|
|
||||||
try {
|
|
||||||
const result = await getCachedOrExecute({
|
|
||||||
cacheKey,
|
|
||||||
actionFn: coreShowTaskAction,
|
|
||||||
log
|
|
||||||
});
|
|
||||||
log.info(`showTaskDirect completed. From cache: ${result.fromCache}`);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
|
||||||
} catch (error) {
|
|
||||||
// Catch unexpected errors from getCachedOrExecute itself
|
|
||||||
disableSilentMode();
|
|
||||||
log.error(
|
|
||||||
`Unexpected error during getCachedOrExecute for showTask: ${error.message}`
|
|
||||||
);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UNEXPECTED_ERROR',
|
|
||||||
message: error.message
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,191 +4,120 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { updateSubtaskById } from '../../../../scripts/modules/task-manager.js';
|
import { updateSubtaskById } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
enableSilentMode,
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import {
|
|
||||||
getAnthropicClientForMCP,
|
|
||||||
getPerplexityClientForMCP
|
|
||||||
} from '../utils/ai-client-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for updateSubtaskById with error handling.
|
* Direct function wrapper for updateSubtaskById with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing id, prompt, useResearch and tasksJsonPath.
|
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @param {Object} context - Context object containing session data.
|
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
export async function updateSubtaskByIdDirect(args, log) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
try {
|
||||||
const { tasksJsonPath, id, prompt, research } = args;
|
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
try {
|
// Check required parameters
|
||||||
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
|
if (!args.id) {
|
||||||
|
const errorMessage = 'No subtask ID specified. Please provide a subtask ID to update.';
|
||||||
// Check if tasksJsonPath was provided
|
log.error(errorMessage);
|
||||||
if (!tasksJsonPath) {
|
return {
|
||||||
const errorMessage = 'tasksJsonPath is required but was not provided.';
|
success: false,
|
||||||
log.error(errorMessage);
|
error: { code: 'MISSING_SUBTASK_ID', message: errorMessage },
|
||||||
return {
|
fromCache: false
|
||||||
success: false,
|
};
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
if (!args.prompt) {
|
||||||
}
|
const errorMessage = 'No prompt specified. Please provide a prompt with information to add to the subtask.';
|
||||||
|
log.error(errorMessage);
|
||||||
// Check required parameters (id and prompt)
|
return {
|
||||||
if (!id) {
|
success: false,
|
||||||
const errorMessage =
|
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||||
'No subtask ID specified. Please provide a subtask ID to update.';
|
fromCache: false
|
||||||
log.error(errorMessage);
|
};
|
||||||
return {
|
}
|
||||||
success: false,
|
|
||||||
error: { code: 'MISSING_SUBTASK_ID', message: errorMessage },
|
// Validate subtask ID format
|
||||||
fromCache: false
|
const subtaskId = args.id;
|
||||||
};
|
if (typeof subtaskId !== 'string' || !subtaskId.includes('.')) {
|
||||||
}
|
const errorMessage = `Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId" (e.g., "5.2").`;
|
||||||
|
log.error(errorMessage);
|
||||||
if (!prompt) {
|
return {
|
||||||
const errorMessage =
|
success: false,
|
||||||
'No prompt specified. Please provide a prompt with information to add to the subtask.';
|
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage },
|
||||||
log.error(errorMessage);
|
fromCache: false
|
||||||
return {
|
};
|
||||||
success: false,
|
}
|
||||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
|
||||||
fromCache: false
|
// Get tasks file path
|
||||||
};
|
let tasksPath;
|
||||||
}
|
try {
|
||||||
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
// Validate subtask ID format
|
} catch (error) {
|
||||||
const subtaskId = id;
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
if (typeof subtaskId !== 'string' && typeof subtaskId !== 'number') {
|
return {
|
||||||
const errorMessage = `Invalid subtask ID type: ${typeof subtaskId}. Subtask ID must be a string or number.`;
|
success: false,
|
||||||
log.error(errorMessage);
|
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||||
return {
|
fromCache: false
|
||||||
success: false,
|
};
|
||||||
error: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage },
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
// Get research flag
|
||||||
}
|
const useResearch = args.research === true;
|
||||||
|
|
||||||
const subtaskIdStr = String(subtaskId);
|
log.info(`Updating subtask with ID ${subtaskId} with prompt "${args.prompt}" and research: ${useResearch}`);
|
||||||
if (!subtaskIdStr.includes('.')) {
|
|
||||||
const errorMessage = `Invalid subtask ID format: ${subtaskIdStr}. Subtask ID must be in format "parentId.subtaskId" (e.g., "5.2").`;
|
try {
|
||||||
log.error(errorMessage);
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
return {
|
enableSilentMode();
|
||||||
success: false,
|
|
||||||
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage },
|
// Execute core updateSubtaskById function
|
||||||
fromCache: false
|
const updatedSubtask = await updateSubtaskById(tasksPath, subtaskId, args.prompt, useResearch);
|
||||||
};
|
|
||||||
}
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
// Use the provided path
|
|
||||||
const tasksPath = tasksJsonPath;
|
// Handle the case where the subtask couldn't be updated (e.g., already marked as done)
|
||||||
|
if (!updatedSubtask) {
|
||||||
// Get research flag
|
return {
|
||||||
const useResearch = research === true;
|
success: false,
|
||||||
|
error: {
|
||||||
log.info(
|
code: 'SUBTASK_UPDATE_FAILED',
|
||||||
`Updating subtask with ID ${subtaskIdStr} with prompt "${prompt}" and research: ${useResearch}`
|
message: 'Failed to update subtask. It may be marked as completed, or another error occurred.'
|
||||||
);
|
},
|
||||||
|
fromCache: false
|
||||||
// Initialize the appropriate AI client based on research flag
|
};
|
||||||
try {
|
}
|
||||||
if (useResearch) {
|
|
||||||
// Initialize Perplexity client
|
// Return the updated subtask information
|
||||||
await getPerplexityClientForMCP(session);
|
return {
|
||||||
} else {
|
success: true,
|
||||||
// Initialize Anthropic client
|
data: {
|
||||||
await getAnthropicClientForMCP(session);
|
message: `Successfully updated subtask with ID ${subtaskId}`,
|
||||||
}
|
subtaskId,
|
||||||
} catch (error) {
|
parentId: subtaskId.split('.')[0],
|
||||||
log.error(`AI client initialization error: ${error.message}`);
|
subtask: updatedSubtask,
|
||||||
return {
|
tasksPath,
|
||||||
success: false,
|
useResearch
|
||||||
error: {
|
},
|
||||||
code: 'AI_CLIENT_ERROR',
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
message: error.message || 'Failed to initialize AI client'
|
};
|
||||||
},
|
} catch (error) {
|
||||||
fromCache: false
|
// Make sure to restore normal logging even if there's an error
|
||||||
};
|
disableSilentMode();
|
||||||
}
|
throw error; // Rethrow to be caught by outer catch block
|
||||||
|
}
|
||||||
try {
|
} catch (error) {
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Ensure silent mode is disabled
|
||||||
enableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
// Create a logger wrapper object to handle logging without breaking the mcpLog[level] calls
|
log.error(`Error updating subtask by ID: ${error.message}`);
|
||||||
// This ensures outputFormat is set to 'json' while still supporting proper logging
|
return {
|
||||||
const logWrapper = {
|
success: false,
|
||||||
info: (message) => log.info(message),
|
error: { code: 'UPDATE_SUBTASK_ERROR', message: error.message || 'Unknown error updating subtask' },
|
||||||
warn: (message) => log.warn(message),
|
fromCache: false
|
||||||
error: (message) => log.error(message),
|
};
|
||||||
debug: (message) => log.debug && log.debug(message),
|
}
|
||||||
success: (message) => log.info(message) // Map success to info if needed
|
}
|
||||||
};
|
|
||||||
|
|
||||||
// Execute core updateSubtaskById function
|
|
||||||
// Pass both session and logWrapper as mcpLog to ensure outputFormat is 'json'
|
|
||||||
const updatedSubtask = await updateSubtaskById(
|
|
||||||
tasksPath,
|
|
||||||
subtaskIdStr,
|
|
||||||
prompt,
|
|
||||||
useResearch,
|
|
||||||
{
|
|
||||||
session,
|
|
||||||
mcpLog: logWrapper
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
// Handle the case where the subtask couldn't be updated (e.g., already marked as done)
|
|
||||||
if (!updatedSubtask) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'SUBTASK_UPDATE_FAILED',
|
|
||||||
message:
|
|
||||||
'Failed to update subtask. It may be marked as completed, or another error occurred.'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the updated subtask information
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: `Successfully updated subtask with ID ${subtaskIdStr}`,
|
|
||||||
subtaskId: subtaskIdStr,
|
|
||||||
parentId: subtaskIdStr.split('.')[0],
|
|
||||||
subtask: updatedSubtask,
|
|
||||||
tasksPath,
|
|
||||||
useResearch
|
|
||||||
},
|
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
throw error; // Rethrow to be caught by outer catch block
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Ensure silent mode is disabled
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error updating subtask by ID: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UPDATE_SUBTASK_ERROR',
|
|
||||||
message: error.message || 'Unknown error updating subtask'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,184 +4,112 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { updateTaskById } from '../../../../scripts/modules/task-manager.js';
|
import { updateTaskById } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import {
|
|
||||||
getAnthropicClientForMCP,
|
|
||||||
getPerplexityClientForMCP
|
|
||||||
} from '../utils/ai-client-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for updateTaskById with error handling.
|
* Direct function wrapper for updateTaskById with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing id, prompt, useResearch and tasksJsonPath.
|
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @param {Object} context - Context object containing session data.
|
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function updateTaskByIdDirect(args, log, context = {}) {
|
export async function updateTaskByIdDirect(args, log) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
try {
|
||||||
// Destructure expected args, including the resolved tasksJsonPath
|
log.info(`Updating task with args: ${JSON.stringify(args)}`);
|
||||||
const { tasksJsonPath, id, prompt, research } = args;
|
|
||||||
|
// Check required parameters
|
||||||
try {
|
if (!args.id) {
|
||||||
log.info(`Updating task with args: ${JSON.stringify(args)}`);
|
const errorMessage = 'No task ID specified. Please provide a task ID to update.';
|
||||||
|
log.error(errorMessage);
|
||||||
// Check if tasksJsonPath was provided
|
return {
|
||||||
if (!tasksJsonPath) {
|
success: false,
|
||||||
const errorMessage = 'tasksJsonPath is required but was not provided.';
|
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
||||||
log.error(errorMessage);
|
fromCache: false
|
||||||
return {
|
};
|
||||||
success: false,
|
}
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
|
||||||
fromCache: false
|
if (!args.prompt) {
|
||||||
};
|
const errorMessage = 'No prompt specified. Please provide a prompt with new information for the task update.';
|
||||||
}
|
log.error(errorMessage);
|
||||||
|
return {
|
||||||
// Check required parameters (id and prompt)
|
success: false,
|
||||||
if (!id) {
|
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||||
const errorMessage =
|
fromCache: false
|
||||||
'No task ID specified. Please provide a task ID to update.';
|
};
|
||||||
log.error(errorMessage);
|
}
|
||||||
return {
|
|
||||||
success: false,
|
// Parse taskId - handle both string and number values
|
||||||
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
let taskId;
|
||||||
fromCache: false
|
if (typeof args.id === 'string') {
|
||||||
};
|
// Handle subtask IDs (e.g., "5.2")
|
||||||
}
|
if (args.id.includes('.')) {
|
||||||
|
taskId = args.id; // Keep as string for subtask IDs
|
||||||
if (!prompt) {
|
} else {
|
||||||
const errorMessage =
|
// Parse as integer for main task IDs
|
||||||
'No prompt specified. Please provide a prompt with new information for the task update.';
|
taskId = parseInt(args.id, 10);
|
||||||
log.error(errorMessage);
|
if (isNaN(taskId)) {
|
||||||
return {
|
const errorMessage = `Invalid task ID: ${args.id}. Task ID must be a positive integer or subtask ID (e.g., "5.2").`;
|
||||||
success: false,
|
log.error(errorMessage);
|
||||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'INVALID_TASK_ID', message: errorMessage },
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Parse taskId - handle both string and number values
|
}
|
||||||
let taskId;
|
}
|
||||||
if (typeof id === 'string') {
|
} else {
|
||||||
// Handle subtask IDs (e.g., "5.2")
|
taskId = args.id;
|
||||||
if (id.includes('.')) {
|
}
|
||||||
taskId = id; // Keep as string for subtask IDs
|
|
||||||
} else {
|
// Get tasks file path
|
||||||
// Parse as integer for main task IDs
|
let tasksPath;
|
||||||
taskId = parseInt(id, 10);
|
try {
|
||||||
if (isNaN(taskId)) {
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
const errorMessage = `Invalid task ID: ${id}. Task ID must be a positive integer or subtask ID (e.g., "5.2").`;
|
} catch (error) {
|
||||||
log.error(errorMessage);
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'INVALID_TASK_ID', message: errorMessage },
|
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
|
||||||
} else {
|
// Get research flag
|
||||||
taskId = id;
|
const useResearch = args.research === true;
|
||||||
}
|
|
||||||
|
log.info(`Updating task with ID ${taskId} with prompt "${args.prompt}" and research: ${useResearch}`);
|
||||||
// Use the provided path
|
|
||||||
const tasksPath = tasksJsonPath;
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
|
enableSilentMode();
|
||||||
// Get research flag
|
|
||||||
const useResearch = research === true;
|
// Execute core updateTaskById function
|
||||||
|
await updateTaskById(tasksPath, taskId, args.prompt, useResearch);
|
||||||
// Initialize appropriate AI client based on research flag
|
|
||||||
let aiClient;
|
// Restore normal logging
|
||||||
try {
|
disableSilentMode();
|
||||||
if (useResearch) {
|
|
||||||
log.info('Using Perplexity AI for research-backed task update');
|
// Since updateTaskById doesn't return a value but modifies the tasks file,
|
||||||
aiClient = await getPerplexityClientForMCP(session, log);
|
// we'll return a success message
|
||||||
} else {
|
return {
|
||||||
log.info('Using Claude AI for task update');
|
success: true,
|
||||||
aiClient = getAnthropicClientForMCP(session, log);
|
data: {
|
||||||
}
|
message: `Successfully updated task with ID ${taskId} based on the prompt`,
|
||||||
} catch (error) {
|
taskId,
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
tasksPath,
|
||||||
return {
|
useResearch
|
||||||
success: false,
|
},
|
||||||
error: {
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
code: 'AI_CLIENT_ERROR',
|
};
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
} catch (error) {
|
||||||
},
|
// Make sure to restore normal logging even if there's an error
|
||||||
fromCache: false
|
disableSilentMode();
|
||||||
};
|
|
||||||
}
|
log.error(`Error updating task by ID: ${error.message}`);
|
||||||
|
return {
|
||||||
log.info(
|
success: false,
|
||||||
`Updating task with ID ${taskId} with prompt "${prompt}" and research: ${useResearch}`
|
error: { code: 'UPDATE_TASK_ERROR', message: error.message || 'Unknown error updating task' },
|
||||||
);
|
fromCache: false
|
||||||
|
};
|
||||||
try {
|
}
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
}
|
||||||
enableSilentMode();
|
|
||||||
|
|
||||||
// Create a logger wrapper that matches what updateTaskById expects
|
|
||||||
const logWrapper = {
|
|
||||||
info: (message) => log.info(message),
|
|
||||||
warn: (message) => log.warn(message),
|
|
||||||
error: (message) => log.error(message),
|
|
||||||
debug: (message) => log.debug && log.debug(message),
|
|
||||||
success: (message) => log.info(message) // Map success to info since many loggers don't have success
|
|
||||||
};
|
|
||||||
|
|
||||||
// Execute core updateTaskById function with proper parameters
|
|
||||||
await updateTaskById(
|
|
||||||
tasksPath,
|
|
||||||
taskId,
|
|
||||||
prompt,
|
|
||||||
useResearch,
|
|
||||||
{
|
|
||||||
mcpLog: logWrapper, // Use our wrapper object that has the expected method structure
|
|
||||||
session
|
|
||||||
},
|
|
||||||
'json'
|
|
||||||
);
|
|
||||||
|
|
||||||
// Since updateTaskById doesn't return a value but modifies the tasks file,
|
|
||||||
// we'll return a success message
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: `Successfully updated task with ID ${taskId} based on the prompt`,
|
|
||||||
taskId,
|
|
||||||
tasksPath: tasksPath, // Return the used path
|
|
||||||
useResearch
|
|
||||||
},
|
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error updating task by ID: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UPDATE_TASK_ERROR',
|
|
||||||
message: error.message || 'Unknown error updating task'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
} finally {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Ensure silent mode is disabled
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error updating task by ID: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UPDATE_TASK_ERROR',
|
|
||||||
message: error.message || 'Unknown error updating task'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,184 +4,112 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import {
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
enableSilentMode,
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import {
|
|
||||||
getAnthropicClientForMCP,
|
|
||||||
getPerplexityClientForMCP
|
|
||||||
} from '../utils/ai-client-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for updating tasks based on new context/prompt.
|
* Direct function wrapper for updating tasks based on new context/prompt.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing fromId, prompt, useResearch and tasksJsonPath.
|
* @param {Object} args - Command arguments containing fromId, prompt, useResearch and file path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @param {Object} context - Context object containing session data.
|
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function updateTasksDirect(args, log, context = {}) {
|
export async function updateTasksDirect(args, log) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
try {
|
||||||
const { tasksJsonPath, from, prompt, research } = args;
|
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
try {
|
// Check required parameters
|
||||||
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
|
if (!args.from) {
|
||||||
|
const errorMessage = 'No from ID specified. Please provide a task ID to start updating from.';
|
||||||
// Check if tasksJsonPath was provided
|
log.error(errorMessage);
|
||||||
if (!tasksJsonPath) {
|
return {
|
||||||
const errorMessage = 'tasksJsonPath is required but was not provided.';
|
success: false,
|
||||||
log.error(errorMessage);
|
error: { code: 'MISSING_FROM_ID', message: errorMessage },
|
||||||
return {
|
fromCache: false
|
||||||
success: false,
|
};
|
||||||
error: { code: 'MISSING_ARGUMENT', message: errorMessage },
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
if (!args.prompt) {
|
||||||
}
|
const errorMessage = 'No prompt specified. Please provide a prompt with new context for task updates.';
|
||||||
|
log.error(errorMessage);
|
||||||
// Check for the common mistake of using 'id' instead of 'from'
|
return {
|
||||||
if (args.id !== undefined && from === undefined) {
|
success: false,
|
||||||
const errorMessage =
|
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||||
"You specified 'id' parameter but 'update' requires 'from' parameter. Use 'from' for this tool or use 'update_task' tool if you want to update a single task.";
|
fromCache: false
|
||||||
log.error(errorMessage);
|
};
|
||||||
return {
|
}
|
||||||
success: false,
|
|
||||||
error: {
|
// Parse fromId - handle both string and number values
|
||||||
code: 'PARAMETER_MISMATCH',
|
let fromId;
|
||||||
message: errorMessage,
|
if (typeof args.from === 'string') {
|
||||||
suggestion:
|
fromId = parseInt(args.from, 10);
|
||||||
"Use 'from' parameter instead of 'id', or use the 'update_task' tool for single task updates"
|
if (isNaN(fromId)) {
|
||||||
},
|
const errorMessage = `Invalid from ID: ${args.from}. Task ID must be a positive integer.`;
|
||||||
fromCache: false
|
log.error(errorMessage);
|
||||||
};
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: { code: 'INVALID_FROM_ID', message: errorMessage },
|
||||||
// Check required parameters
|
fromCache: false
|
||||||
if (!from) {
|
};
|
||||||
const errorMessage =
|
}
|
||||||
'No from ID specified. Please provide a task ID to start updating from.';
|
} else {
|
||||||
log.error(errorMessage);
|
fromId = args.from;
|
||||||
return {
|
}
|
||||||
success: false,
|
|
||||||
error: { code: 'MISSING_FROM_ID', message: errorMessage },
|
// Get tasks file path
|
||||||
fromCache: false
|
let tasksPath;
|
||||||
};
|
try {
|
||||||
}
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
|
} catch (error) {
|
||||||
if (!prompt) {
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
const errorMessage =
|
return {
|
||||||
'No prompt specified. Please provide a prompt with new context for task updates.';
|
success: false,
|
||||||
log.error(errorMessage);
|
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||||
return {
|
fromCache: false
|
||||||
success: false,
|
};
|
||||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
// Get research flag
|
||||||
}
|
const useResearch = args.research === true;
|
||||||
|
|
||||||
// Parse fromId - handle both string and number values
|
log.info(`Updating tasks from ID ${fromId} with prompt "${args.prompt}" and research: ${useResearch}`);
|
||||||
let fromId;
|
|
||||||
if (typeof from === 'string') {
|
try {
|
||||||
fromId = parseInt(from, 10);
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
if (isNaN(fromId)) {
|
enableSilentMode();
|
||||||
const errorMessage = `Invalid from ID: ${from}. Task ID must be a positive integer.`;
|
|
||||||
log.error(errorMessage);
|
// Execute core updateTasks function
|
||||||
return {
|
await updateTasks(tasksPath, fromId, args.prompt, useResearch);
|
||||||
success: false,
|
|
||||||
error: { code: 'INVALID_FROM_ID', message: errorMessage },
|
// Restore normal logging
|
||||||
fromCache: false
|
disableSilentMode();
|
||||||
};
|
|
||||||
}
|
// Since updateTasks doesn't return a value but modifies the tasks file,
|
||||||
} else {
|
// we'll return a success message
|
||||||
fromId = from;
|
return {
|
||||||
}
|
success: true,
|
||||||
|
data: {
|
||||||
// Get research flag
|
message: `Successfully updated tasks from ID ${fromId} based on the prompt`,
|
||||||
const useResearch = research === true;
|
fromId,
|
||||||
|
tasksPath,
|
||||||
// Initialize appropriate AI client based on research flag
|
useResearch
|
||||||
let aiClient;
|
},
|
||||||
try {
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
if (useResearch) {
|
};
|
||||||
log.info('Using Perplexity AI for research-backed task updates');
|
} catch (error) {
|
||||||
aiClient = await getPerplexityClientForMCP(session, log);
|
// Make sure to restore normal logging even if there's an error
|
||||||
} else {
|
disableSilentMode();
|
||||||
log.info('Using Claude AI for task updates');
|
throw error; // Rethrow to be caught by outer catch block
|
||||||
aiClient = getAnthropicClientForMCP(session, log);
|
}
|
||||||
}
|
} catch (error) {
|
||||||
} catch (error) {
|
// Ensure silent mode is disabled
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
disableSilentMode();
|
||||||
return {
|
|
||||||
success: false,
|
log.error(`Error updating tasks: ${error.message}`);
|
||||||
error: {
|
return {
|
||||||
code: 'AI_CLIENT_ERROR',
|
success: false,
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
error: { code: 'UPDATE_TASKS_ERROR', message: error.message || 'Unknown error updating tasks' },
|
||||||
},
|
fromCache: false
|
||||||
fromCache: false
|
};
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(
|
|
||||||
`Updating tasks from ID ${fromId} with prompt "${prompt}" and research: ${useResearch}`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Create the logger wrapper to ensure compatibility with core functions
|
|
||||||
const logWrapper = {
|
|
||||||
info: (message, ...args) => log.info(message, ...args),
|
|
||||||
warn: (message, ...args) => log.warn(message, ...args),
|
|
||||||
error: (message, ...args) => log.error(message, ...args),
|
|
||||||
debug: (message, ...args) => log.debug && log.debug(message, ...args), // Handle optional debug
|
|
||||||
success: (message, ...args) => log.info(message, ...args) // Map success to info if needed
|
|
||||||
};
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
|
||||||
|
|
||||||
// Execute core updateTasks function, passing the AI client and session
|
|
||||||
await updateTasks(tasksJsonPath, fromId, prompt, useResearch, {
|
|
||||||
mcpLog: logWrapper, // Pass the wrapper instead of the raw log object
|
|
||||||
session
|
|
||||||
});
|
|
||||||
|
|
||||||
// Since updateTasks doesn't return a value but modifies the tasks file,
|
|
||||||
// we'll return a success message
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: `Successfully updated tasks from ID ${fromId} based on the prompt`,
|
|
||||||
fromId,
|
|
||||||
tasksPath: tasksJsonPath,
|
|
||||||
useResearch
|
|
||||||
},
|
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error updating tasks: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UPDATE_TASKS_ERROR',
|
|
||||||
message: error.message || 'Unknown error updating tasks'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
} finally {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Ensure silent mode is disabled
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error updating tasks: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'UPDATE_TASKS_ERROR',
|
|
||||||
message: error.message || 'Unknown error updating tasks'
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,78 +3,63 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
import { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
||||||
import {
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
enableSilentMode,
|
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
||||||
disableSilentMode
|
|
||||||
} from '../../../../scripts/modules/utils.js';
|
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Validate dependencies in tasks.json
|
* Validate dependencies in tasks.json
|
||||||
* @param {Object} args - Function arguments
|
* @param {Object} args - Function arguments
|
||||||
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
|
* @param {string} [args.file] - Path to the tasks file
|
||||||
|
* @param {string} [args.projectRoot] - Project root directory
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function validateDependenciesDirect(args, log) {
|
export async function validateDependenciesDirect(args, log) {
|
||||||
// Destructure the explicit tasksJsonPath
|
try {
|
||||||
const { tasksJsonPath } = args;
|
log.info(`Validating dependencies in tasks...`);
|
||||||
|
|
||||||
if (!tasksJsonPath) {
|
// Find the tasks.json path
|
||||||
log.error('validateDependenciesDirect called without tasksJsonPath');
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
return {
|
|
||||||
success: false,
|
// Verify the file exists
|
||||||
error: {
|
if (!fs.existsSync(tasksPath)) {
|
||||||
code: 'MISSING_ARGUMENT',
|
return {
|
||||||
message: 'tasksJsonPath is required'
|
success: false,
|
||||||
}
|
error: {
|
||||||
};
|
code: 'FILE_NOT_FOUND',
|
||||||
}
|
message: `Tasks file not found at ${tasksPath}`
|
||||||
|
}
|
||||||
try {
|
};
|
||||||
log.info(`Validating dependencies in tasks: ${tasksJsonPath}`);
|
}
|
||||||
|
|
||||||
// Use the provided tasksJsonPath
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
const tasksPath = tasksJsonPath;
|
enableSilentMode();
|
||||||
|
|
||||||
// Verify the file exists
|
// Call the original command function
|
||||||
if (!fs.existsSync(tasksPath)) {
|
await validateDependenciesCommand(tasksPath);
|
||||||
return {
|
|
||||||
success: false,
|
// Restore normal logging
|
||||||
error: {
|
disableSilentMode();
|
||||||
code: 'FILE_NOT_FOUND',
|
|
||||||
message: `Tasks file not found at ${tasksPath}`
|
return {
|
||||||
}
|
success: true,
|
||||||
};
|
data: {
|
||||||
}
|
message: 'Dependencies validated successfully',
|
||||||
|
tasksPath
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
}
|
||||||
enableSilentMode();
|
};
|
||||||
|
} catch (error) {
|
||||||
// Call the original command function using the provided tasksPath
|
// Make sure to restore normal logging even if there's an error
|
||||||
await validateDependenciesCommand(tasksPath);
|
disableSilentMode();
|
||||||
|
|
||||||
// Restore normal logging
|
log.error(`Error validating dependencies: ${error.message}`);
|
||||||
disableSilentMode();
|
return {
|
||||||
|
success: false,
|
||||||
return {
|
error: {
|
||||||
success: true,
|
code: 'VALIDATION_ERROR',
|
||||||
data: {
|
message: error.message
|
||||||
message: 'Dependencies validated successfully',
|
}
|
||||||
tasksPath
|
};
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error validating dependencies: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'VALIDATION_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -28,71 +28,60 @@ import { fixDependenciesDirect } from './direct-functions/fix-dependencies.js';
|
|||||||
import { complexityReportDirect } from './direct-functions/complexity-report.js';
|
import { complexityReportDirect } from './direct-functions/complexity-report.js';
|
||||||
import { addDependencyDirect } from './direct-functions/add-dependency.js';
|
import { addDependencyDirect } from './direct-functions/add-dependency.js';
|
||||||
import { removeTaskDirect } from './direct-functions/remove-task.js';
|
import { removeTaskDirect } from './direct-functions/remove-task.js';
|
||||||
import { initializeProjectDirect } from './direct-functions/initialize-project-direct.js';
|
|
||||||
|
|
||||||
// Re-export utility functions
|
// Re-export utility functions
|
||||||
export { findTasksJsonPath } from './utils/path-utils.js';
|
export { findTasksJsonPath } from './utils/path-utils.js';
|
||||||
|
|
||||||
// Re-export AI client utilities
|
|
||||||
export {
|
|
||||||
getAnthropicClientForMCP,
|
|
||||||
getPerplexityClientForMCP,
|
|
||||||
getModelConfig,
|
|
||||||
getBestAvailableAIModel,
|
|
||||||
handleClaudeError
|
|
||||||
} from './utils/ai-client-utils.js';
|
|
||||||
|
|
||||||
// Use Map for potential future enhancements like introspection or dynamic dispatch
|
// Use Map for potential future enhancements like introspection or dynamic dispatch
|
||||||
export const directFunctions = new Map([
|
export const directFunctions = new Map([
|
||||||
['listTasksDirect', listTasksDirect],
|
['listTasksDirect', listTasksDirect],
|
||||||
['getCacheStatsDirect', getCacheStatsDirect],
|
['getCacheStatsDirect', getCacheStatsDirect],
|
||||||
['parsePRDDirect', parsePRDDirect],
|
['parsePRDDirect', parsePRDDirect],
|
||||||
['updateTasksDirect', updateTasksDirect],
|
['updateTasksDirect', updateTasksDirect],
|
||||||
['updateTaskByIdDirect', updateTaskByIdDirect],
|
['updateTaskByIdDirect', updateTaskByIdDirect],
|
||||||
['updateSubtaskByIdDirect', updateSubtaskByIdDirect],
|
['updateSubtaskByIdDirect', updateSubtaskByIdDirect],
|
||||||
['generateTaskFilesDirect', generateTaskFilesDirect],
|
['generateTaskFilesDirect', generateTaskFilesDirect],
|
||||||
['setTaskStatusDirect', setTaskStatusDirect],
|
['setTaskStatusDirect', setTaskStatusDirect],
|
||||||
['showTaskDirect', showTaskDirect],
|
['showTaskDirect', showTaskDirect],
|
||||||
['nextTaskDirect', nextTaskDirect],
|
['nextTaskDirect', nextTaskDirect],
|
||||||
['expandTaskDirect', expandTaskDirect],
|
['expandTaskDirect', expandTaskDirect],
|
||||||
['addTaskDirect', addTaskDirect],
|
['addTaskDirect', addTaskDirect],
|
||||||
['addSubtaskDirect', addSubtaskDirect],
|
['addSubtaskDirect', addSubtaskDirect],
|
||||||
['removeSubtaskDirect', removeSubtaskDirect],
|
['removeSubtaskDirect', removeSubtaskDirect],
|
||||||
['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],
|
['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],
|
||||||
['clearSubtasksDirect', clearSubtasksDirect],
|
['clearSubtasksDirect', clearSubtasksDirect],
|
||||||
['expandAllTasksDirect', expandAllTasksDirect],
|
['expandAllTasksDirect', expandAllTasksDirect],
|
||||||
['removeDependencyDirect', removeDependencyDirect],
|
['removeDependencyDirect', removeDependencyDirect],
|
||||||
['validateDependenciesDirect', validateDependenciesDirect],
|
['validateDependenciesDirect', validateDependenciesDirect],
|
||||||
['fixDependenciesDirect', fixDependenciesDirect],
|
['fixDependenciesDirect', fixDependenciesDirect],
|
||||||
['complexityReportDirect', complexityReportDirect],
|
['complexityReportDirect', complexityReportDirect],
|
||||||
['addDependencyDirect', addDependencyDirect],
|
['addDependencyDirect', addDependencyDirect],
|
||||||
['removeTaskDirect', removeTaskDirect]
|
['removeTaskDirect', removeTaskDirect]
|
||||||
]);
|
]);
|
||||||
|
|
||||||
// Re-export all direct function implementations
|
// Re-export all direct function implementations
|
||||||
export {
|
export {
|
||||||
listTasksDirect,
|
listTasksDirect,
|
||||||
getCacheStatsDirect,
|
getCacheStatsDirect,
|
||||||
parsePRDDirect,
|
parsePRDDirect,
|
||||||
updateTasksDirect,
|
updateTasksDirect,
|
||||||
updateTaskByIdDirect,
|
updateTaskByIdDirect,
|
||||||
updateSubtaskByIdDirect,
|
updateSubtaskByIdDirect,
|
||||||
generateTaskFilesDirect,
|
generateTaskFilesDirect,
|
||||||
setTaskStatusDirect,
|
setTaskStatusDirect,
|
||||||
showTaskDirect,
|
showTaskDirect,
|
||||||
nextTaskDirect,
|
nextTaskDirect,
|
||||||
expandTaskDirect,
|
expandTaskDirect,
|
||||||
addTaskDirect,
|
addTaskDirect,
|
||||||
addSubtaskDirect,
|
addSubtaskDirect,
|
||||||
removeSubtaskDirect,
|
removeSubtaskDirect,
|
||||||
analyzeTaskComplexityDirect,
|
analyzeTaskComplexityDirect,
|
||||||
clearSubtasksDirect,
|
clearSubtasksDirect,
|
||||||
expandAllTasksDirect,
|
expandAllTasksDirect,
|
||||||
removeDependencyDirect,
|
removeDependencyDirect,
|
||||||
validateDependenciesDirect,
|
validateDependenciesDirect,
|
||||||
fixDependenciesDirect,
|
fixDependenciesDirect,
|
||||||
complexityReportDirect,
|
complexityReportDirect,
|
||||||
addDependencyDirect,
|
addDependencyDirect,
|
||||||
removeTaskDirect,
|
removeTaskDirect
|
||||||
initializeProjectDirect
|
};
|
||||||
};
|
|
||||||
@@ -11,9 +11,9 @@ dotenv.config();
|
|||||||
|
|
||||||
// Default model configuration from CLI environment
|
// Default model configuration from CLI environment
|
||||||
const DEFAULT_MODEL_CONFIG = {
|
const DEFAULT_MODEL_CONFIG = {
|
||||||
model: 'claude-3-7-sonnet-20250219',
|
model: 'claude-3-7-sonnet-20250219',
|
||||||
maxTokens: 64000,
|
maxTokens: 64000,
|
||||||
temperature: 0.2
|
temperature: 0.2
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -24,28 +24,25 @@ const DEFAULT_MODEL_CONFIG = {
|
|||||||
* @throws {Error} If API key is missing
|
* @throws {Error} If API key is missing
|
||||||
*/
|
*/
|
||||||
export function getAnthropicClientForMCP(session, log = console) {
|
export function getAnthropicClientForMCP(session, log = console) {
|
||||||
try {
|
try {
|
||||||
// Extract API key from session.env or fall back to environment variables
|
// Extract API key from session.env or fall back to environment variables
|
||||||
const apiKey =
|
const apiKey = session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY;
|
||||||
session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY;
|
|
||||||
|
if (!apiKey) {
|
||||||
if (!apiKey) {
|
throw new Error('ANTHROPIC_API_KEY not found in session environment or process.env');
|
||||||
throw new Error(
|
}
|
||||||
'ANTHROPIC_API_KEY not found in session environment or process.env'
|
|
||||||
);
|
// Initialize and return a new Anthropic client
|
||||||
}
|
return new Anthropic({
|
||||||
|
apiKey,
|
||||||
// Initialize and return a new Anthropic client
|
defaultHeaders: {
|
||||||
return new Anthropic({
|
'anthropic-beta': 'output-128k-2025-02-19' // Include header for increased token limit
|
||||||
apiKey,
|
}
|
||||||
defaultHeaders: {
|
});
|
||||||
'anthropic-beta': 'output-128k-2025-02-19' // Include header for increased token limit
|
} catch (error) {
|
||||||
}
|
log.error(`Failed to initialize Anthropic client: ${error.message}`);
|
||||||
});
|
throw error;
|
||||||
} catch (error) {
|
}
|
||||||
log.error(`Failed to initialize Anthropic client: ${error.message}`);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -56,29 +53,26 @@ export function getAnthropicClientForMCP(session, log = console) {
|
|||||||
* @throws {Error} If API key is missing or OpenAI package can't be imported
|
* @throws {Error} If API key is missing or OpenAI package can't be imported
|
||||||
*/
|
*/
|
||||||
export async function getPerplexityClientForMCP(session, log = console) {
|
export async function getPerplexityClientForMCP(session, log = console) {
|
||||||
try {
|
try {
|
||||||
// Extract API key from session.env or fall back to environment variables
|
// Extract API key from session.env or fall back to environment variables
|
||||||
const apiKey =
|
const apiKey = session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY;
|
||||||
session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY;
|
|
||||||
|
if (!apiKey) {
|
||||||
if (!apiKey) {
|
throw new Error('PERPLEXITY_API_KEY not found in session environment or process.env');
|
||||||
throw new Error(
|
}
|
||||||
'PERPLEXITY_API_KEY not found in session environment or process.env'
|
|
||||||
);
|
// Dynamically import OpenAI (it may not be used in all contexts)
|
||||||
}
|
const { default: OpenAI } = await import('openai');
|
||||||
|
|
||||||
// Dynamically import OpenAI (it may not be used in all contexts)
|
// Initialize and return a new OpenAI client configured for Perplexity
|
||||||
const { default: OpenAI } = await import('openai');
|
return new OpenAI({
|
||||||
|
apiKey,
|
||||||
// Initialize and return a new OpenAI client configured for Perplexity
|
baseURL: 'https://api.perplexity.ai'
|
||||||
return new OpenAI({
|
});
|
||||||
apiKey,
|
} catch (error) {
|
||||||
baseURL: 'https://api.perplexity.ai'
|
log.error(`Failed to initialize Perplexity client: ${error.message}`);
|
||||||
});
|
throw error;
|
||||||
} catch (error) {
|
}
|
||||||
log.error(`Failed to initialize Perplexity client: ${error.message}`);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -88,12 +82,12 @@ export async function getPerplexityClientForMCP(session, log = console) {
|
|||||||
* @returns {Object} Model configuration with model, maxTokens, and temperature
|
* @returns {Object} Model configuration with model, maxTokens, and temperature
|
||||||
*/
|
*/
|
||||||
export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
|
export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
|
||||||
// Get values from session or fall back to defaults
|
// Get values from session or fall back to defaults
|
||||||
return {
|
return {
|
||||||
model: session?.env?.MODEL || defaults.model,
|
model: session?.env?.MODEL || defaults.model,
|
||||||
maxTokens: parseInt(session?.env?.MAX_TOKENS || defaults.maxTokens),
|
maxTokens: parseInt(session?.env?.MAX_TOKENS || defaults.maxTokens),
|
||||||
temperature: parseFloat(session?.env?.TEMPERATURE || defaults.temperature)
|
temperature: parseFloat(session?.env?.TEMPERATURE || defaults.temperature)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -106,78 +100,59 @@ export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
|
|||||||
* @returns {Promise<Object>} Selected model info with type and client
|
* @returns {Promise<Object>} Selected model info with type and client
|
||||||
* @throws {Error} If no AI models are available
|
* @throws {Error} If no AI models are available
|
||||||
*/
|
*/
|
||||||
export async function getBestAvailableAIModel(
|
export async function getBestAvailableAIModel(session, options = {}, log = console) {
|
||||||
session,
|
const { requiresResearch = false, claudeOverloaded = false } = options;
|
||||||
options = {},
|
|
||||||
log = console
|
// Test case: When research is needed but no Perplexity, use Claude
|
||||||
) {
|
if (requiresResearch &&
|
||||||
const { requiresResearch = false, claudeOverloaded = false } = options;
|
!(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY) &&
|
||||||
|
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
||||||
// Test case: When research is needed but no Perplexity, use Claude
|
try {
|
||||||
if (
|
log.warn('Perplexity not available for research, using Claude');
|
||||||
requiresResearch &&
|
const client = getAnthropicClientForMCP(session, log);
|
||||||
!(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY) &&
|
return { type: 'claude', client };
|
||||||
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
|
} catch (error) {
|
||||||
) {
|
log.error(`Claude not available: ${error.message}`);
|
||||||
try {
|
throw new Error('No AI models available for research');
|
||||||
log.warn('Perplexity not available for research, using Claude');
|
}
|
||||||
const client = getAnthropicClientForMCP(session, log);
|
}
|
||||||
return { type: 'claude', client };
|
|
||||||
} catch (error) {
|
// Regular path: Perplexity for research when available
|
||||||
log.error(`Claude not available: ${error.message}`);
|
if (requiresResearch && (session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY)) {
|
||||||
throw new Error('No AI models available for research');
|
try {
|
||||||
}
|
const client = await getPerplexityClientForMCP(session, log);
|
||||||
}
|
return { type: 'perplexity', client };
|
||||||
|
} catch (error) {
|
||||||
// Regular path: Perplexity for research when available
|
log.warn(`Perplexity not available: ${error.message}`);
|
||||||
if (
|
// Fall through to Claude as backup
|
||||||
requiresResearch &&
|
}
|
||||||
(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY)
|
}
|
||||||
) {
|
|
||||||
try {
|
// Test case: Claude for overloaded scenario
|
||||||
const client = await getPerplexityClientForMCP(session, log);
|
if (claudeOverloaded && (session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
||||||
return { type: 'perplexity', client };
|
try {
|
||||||
} catch (error) {
|
log.warn('Claude is overloaded but no alternatives are available. Proceeding with Claude anyway.');
|
||||||
log.warn(`Perplexity not available: ${error.message}`);
|
const client = getAnthropicClientForMCP(session, log);
|
||||||
// Fall through to Claude as backup
|
return { type: 'claude', client };
|
||||||
}
|
} catch (error) {
|
||||||
}
|
log.error(`Claude not available despite being overloaded: ${error.message}`);
|
||||||
|
throw new Error('No AI models available');
|
||||||
// Test case: Claude for overloaded scenario
|
}
|
||||||
if (
|
}
|
||||||
claudeOverloaded &&
|
|
||||||
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
|
// Default case: Use Claude when available and not overloaded
|
||||||
) {
|
if (!claudeOverloaded && (session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
||||||
try {
|
try {
|
||||||
log.warn(
|
const client = getAnthropicClientForMCP(session, log);
|
||||||
'Claude is overloaded but no alternatives are available. Proceeding with Claude anyway.'
|
return { type: 'claude', client };
|
||||||
);
|
} catch (error) {
|
||||||
const client = getAnthropicClientForMCP(session, log);
|
log.warn(`Claude not available: ${error.message}`);
|
||||||
return { type: 'claude', client };
|
// Fall through to error if no other options
|
||||||
} catch (error) {
|
}
|
||||||
log.error(
|
}
|
||||||
`Claude not available despite being overloaded: ${error.message}`
|
|
||||||
);
|
// If we got here, no models were successfully initialized
|
||||||
throw new Error('No AI models available');
|
throw new Error('No AI models available. Please check your API keys.');
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default case: Use Claude when available and not overloaded
|
|
||||||
if (
|
|
||||||
!claudeOverloaded &&
|
|
||||||
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
|
|
||||||
) {
|
|
||||||
try {
|
|
||||||
const client = getAnthropicClientForMCP(session, log);
|
|
||||||
return { type: 'claude', client };
|
|
||||||
} catch (error) {
|
|
||||||
log.warn(`Claude not available: ${error.message}`);
|
|
||||||
// Fall through to error if no other options
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we got here, no models were successfully initialized
|
|
||||||
throw new Error('No AI models available. Please check your API keys.');
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -186,28 +161,28 @@ export async function getBestAvailableAIModel(
|
|||||||
* @returns {string} User-friendly error message
|
* @returns {string} User-friendly error message
|
||||||
*/
|
*/
|
||||||
export function handleClaudeError(error) {
|
export function handleClaudeError(error) {
|
||||||
// Check if it's a structured error response
|
// Check if it's a structured error response
|
||||||
if (error.type === 'error' && error.error) {
|
if (error.type === 'error' && error.error) {
|
||||||
switch (error.error.type) {
|
switch (error.error.type) {
|
||||||
case 'overloaded_error':
|
case 'overloaded_error':
|
||||||
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
|
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
|
||||||
case 'rate_limit_error':
|
case 'rate_limit_error':
|
||||||
return 'You have exceeded the rate limit. Please wait a few minutes before making more requests.';
|
return 'You have exceeded the rate limit. Please wait a few minutes before making more requests.';
|
||||||
case 'invalid_request_error':
|
case 'invalid_request_error':
|
||||||
return 'There was an issue with the request format. If this persists, please report it as a bug.';
|
return 'There was an issue with the request format. If this persists, please report it as a bug.';
|
||||||
default:
|
default:
|
||||||
return `Claude API error: ${error.error.message}`;
|
return `Claude API error: ${error.error.message}`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for network/timeout errors
|
// Check for network/timeout errors
|
||||||
if (error.message?.toLowerCase().includes('timeout')) {
|
if (error.message?.toLowerCase().includes('timeout')) {
|
||||||
return 'The request to Claude timed out. Please try again.';
|
return 'The request to Claude timed out. Please try again.';
|
||||||
}
|
}
|
||||||
if (error.message?.toLowerCase().includes('network')) {
|
if (error.message?.toLowerCase().includes('network')) {
|
||||||
return 'There was a network error connecting to Claude. Please check your internet connection and try again.';
|
return 'There was a network error connecting to Claude. Please check your internet connection and try again.';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default error message
|
// Default error message
|
||||||
return `Error communicating with Claude: ${error.message}`;
|
return `Error communicating with Claude: ${error.message}`;
|
||||||
}
|
}
|
||||||
@@ -1,247 +1,213 @@
|
|||||||
import { v4 as uuidv4 } from 'uuid';
|
import { v4 as uuidv4 } from 'uuid';
|
||||||
|
|
||||||
class AsyncOperationManager {
|
class AsyncOperationManager {
|
||||||
constructor() {
|
constructor() {
|
||||||
this.operations = new Map(); // Stores active operation state
|
this.operations = new Map(); // Stores active operation state
|
||||||
this.completedOperations = new Map(); // Stores completed operations
|
this.completedOperations = new Map(); // Stores completed operations
|
||||||
this.maxCompletedOperations = 100; // Maximum number of completed operations to store
|
this.maxCompletedOperations = 100; // Maximum number of completed operations to store
|
||||||
this.listeners = new Map(); // For potential future notifications
|
this.listeners = new Map(); // For potential future notifications
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Adds an operation to be executed asynchronously.
|
* Adds an operation to be executed asynchronously.
|
||||||
* @param {Function} operationFn - The async function to execute (e.g., a Direct function).
|
* @param {Function} operationFn - The async function to execute (e.g., a Direct function).
|
||||||
* @param {Object} args - Arguments to pass to the operationFn.
|
* @param {Object} args - Arguments to pass to the operationFn.
|
||||||
* @param {Object} context - The MCP tool context { log, reportProgress, session }.
|
* @param {Object} context - The MCP tool context { log, reportProgress, session }.
|
||||||
* @returns {string} The unique ID assigned to this operation.
|
* @returns {string} The unique ID assigned to this operation.
|
||||||
*/
|
*/
|
||||||
addOperation(operationFn, args, context) {
|
addOperation(operationFn, args, context) {
|
||||||
const operationId = `op-${uuidv4()}`;
|
const operationId = `op-${uuidv4()}`;
|
||||||
const operation = {
|
const operation = {
|
||||||
id: operationId,
|
id: operationId,
|
||||||
status: 'pending',
|
status: 'pending',
|
||||||
startTime: Date.now(),
|
startTime: Date.now(),
|
||||||
endTime: null,
|
endTime: null,
|
||||||
result: null,
|
result: null,
|
||||||
error: null,
|
error: null,
|
||||||
// Store necessary parts of context, especially log for background execution
|
// Store necessary parts of context, especially log for background execution
|
||||||
log: context.log,
|
log: context.log,
|
||||||
reportProgress: context.reportProgress, // Pass reportProgress through
|
reportProgress: context.reportProgress, // Pass reportProgress through
|
||||||
session: context.session // Pass session through if needed by the operationFn
|
session: context.session // Pass session through if needed by the operationFn
|
||||||
};
|
};
|
||||||
this.operations.set(operationId, operation);
|
this.operations.set(operationId, operation);
|
||||||
this.log(operationId, 'info', `Operation added.`);
|
this.log(operationId, 'info', `Operation added.`);
|
||||||
|
|
||||||
// Start execution in the background (don't await here)
|
// Start execution in the background (don't await here)
|
||||||
this._runOperation(operationId, operationFn, args, context).catch((err) => {
|
this._runOperation(operationId, operationFn, args, context).catch(err => {
|
||||||
// Catch unexpected errors during the async execution setup itself
|
// Catch unexpected errors during the async execution setup itself
|
||||||
this.log(
|
this.log(operationId, 'error', `Critical error starting operation: ${err.message}`, { stack: err.stack });
|
||||||
operationId,
|
operation.status = 'failed';
|
||||||
'error',
|
operation.error = { code: 'MANAGER_EXECUTION_ERROR', message: err.message };
|
||||||
`Critical error starting operation: ${err.message}`,
|
operation.endTime = Date.now();
|
||||||
{ stack: err.stack }
|
|
||||||
);
|
// Move to completed operations
|
||||||
operation.status = 'failed';
|
this._moveToCompleted(operationId);
|
||||||
operation.error = {
|
});
|
||||||
code: 'MANAGER_EXECUTION_ERROR',
|
|
||||||
message: err.message
|
|
||||||
};
|
|
||||||
operation.endTime = Date.now();
|
|
||||||
|
|
||||||
// Move to completed operations
|
return operationId;
|
||||||
this._moveToCompleted(operationId);
|
}
|
||||||
});
|
|
||||||
|
|
||||||
return operationId;
|
/**
|
||||||
}
|
* Internal function to execute the operation.
|
||||||
|
* @param {string} operationId - The ID of the operation.
|
||||||
|
* @param {Function} operationFn - The async function to execute.
|
||||||
|
* @param {Object} args - Arguments for the function.
|
||||||
|
* @param {Object} context - The original MCP tool context.
|
||||||
|
*/
|
||||||
|
async _runOperation(operationId, operationFn, args, context) {
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
if (!operation) return; // Should not happen
|
||||||
|
|
||||||
/**
|
operation.status = 'running';
|
||||||
* Internal function to execute the operation.
|
this.log(operationId, 'info', `Operation running.`);
|
||||||
* @param {string} operationId - The ID of the operation.
|
this.emit('statusChanged', { operationId, status: 'running' });
|
||||||
* @param {Function} operationFn - The async function to execute.
|
|
||||||
* @param {Object} args - Arguments for the function.
|
|
||||||
* @param {Object} context - The original MCP tool context.
|
|
||||||
*/
|
|
||||||
async _runOperation(operationId, operationFn, args, context) {
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
if (!operation) return; // Should not happen
|
|
||||||
|
|
||||||
operation.status = 'running';
|
try {
|
||||||
this.log(operationId, 'info', `Operation running.`);
|
// Pass the necessary context parts to the direct function
|
||||||
this.emit('statusChanged', { operationId, status: 'running' });
|
// The direct function needs to be adapted if it needs reportProgress
|
||||||
|
// We pass the original context's log, plus our wrapped reportProgress
|
||||||
|
const result = await operationFn(args, operation.log, {
|
||||||
|
reportProgress: (progress) => this._handleProgress(operationId, progress),
|
||||||
|
mcpLog: operation.log, // Pass log as mcpLog if direct fn expects it
|
||||||
|
session: operation.session
|
||||||
|
});
|
||||||
|
|
||||||
|
operation.status = result.success ? 'completed' : 'failed';
|
||||||
|
operation.result = result.success ? result.data : null;
|
||||||
|
operation.error = result.success ? null : result.error;
|
||||||
|
this.log(operationId, 'info', `Operation finished with status: ${operation.status}`);
|
||||||
|
|
||||||
try {
|
} catch (error) {
|
||||||
// Pass the necessary context parts to the direct function
|
this.log(operationId, 'error', `Operation failed with error: ${error.message}`, { stack: error.stack });
|
||||||
// The direct function needs to be adapted if it needs reportProgress
|
operation.status = 'failed';
|
||||||
// We pass the original context's log, plus our wrapped reportProgress
|
operation.error = { code: 'OPERATION_EXECUTION_ERROR', message: error.message };
|
||||||
const result = await operationFn(args, operation.log, {
|
} finally {
|
||||||
reportProgress: (progress) =>
|
operation.endTime = Date.now();
|
||||||
this._handleProgress(operationId, progress),
|
this.emit('statusChanged', { operationId, status: operation.status, result: operation.result, error: operation.error });
|
||||||
mcpLog: operation.log, // Pass log as mcpLog if direct fn expects it
|
|
||||||
session: operation.session
|
// Move to completed operations if done or failed
|
||||||
});
|
if (operation.status === 'completed' || operation.status === 'failed') {
|
||||||
|
this._moveToCompleted(operationId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Move an operation from active operations to completed operations history.
|
||||||
|
* @param {string} operationId - The ID of the operation to move.
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
_moveToCompleted(operationId) {
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
if (!operation) return;
|
||||||
|
|
||||||
|
// Store only the necessary data in completed operations
|
||||||
|
const completedData = {
|
||||||
|
id: operation.id,
|
||||||
|
status: operation.status,
|
||||||
|
startTime: operation.startTime,
|
||||||
|
endTime: operation.endTime,
|
||||||
|
result: operation.result,
|
||||||
|
error: operation.error,
|
||||||
|
};
|
||||||
|
|
||||||
|
this.completedOperations.set(operationId, completedData);
|
||||||
|
this.operations.delete(operationId);
|
||||||
|
|
||||||
|
// Trim completed operations if exceeding maximum
|
||||||
|
if (this.completedOperations.size > this.maxCompletedOperations) {
|
||||||
|
// Get the oldest operation (sorted by endTime)
|
||||||
|
const oldest = [...this.completedOperations.entries()]
|
||||||
|
.sort((a, b) => a[1].endTime - b[1].endTime)[0];
|
||||||
|
|
||||||
|
if (oldest) {
|
||||||
|
this.completedOperations.delete(oldest[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles progress updates from the running operation and forwards them.
|
||||||
|
* @param {string} operationId - The ID of the operation reporting progress.
|
||||||
|
* @param {Object} progress - The progress object { progress, total? }.
|
||||||
|
*/
|
||||||
|
_handleProgress(operationId, progress) {
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
if (operation && operation.reportProgress) {
|
||||||
|
try {
|
||||||
|
// Use the reportProgress function captured from the original context
|
||||||
|
operation.reportProgress(progress);
|
||||||
|
this.log(operationId, 'debug', `Reported progress: ${JSON.stringify(progress)}`);
|
||||||
|
} catch(err) {
|
||||||
|
this.log(operationId, 'warn', `Failed to report progress: ${err.message}`);
|
||||||
|
// Don't stop the operation, just log the reporting failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
operation.status = result.success ? 'completed' : 'failed';
|
/**
|
||||||
operation.result = result.success ? result.data : null;
|
* Retrieves the status and result/error of an operation.
|
||||||
operation.error = result.success ? null : result.error;
|
* @param {string} operationId - The ID of the operation.
|
||||||
this.log(
|
* @returns {Object | null} The operation details or null if not found.
|
||||||
operationId,
|
*/
|
||||||
'info',
|
getStatus(operationId) {
|
||||||
`Operation finished with status: ${operation.status}`
|
// First check active operations
|
||||||
);
|
const operation = this.operations.get(operationId);
|
||||||
} catch (error) {
|
if (operation) {
|
||||||
this.log(
|
return {
|
||||||
operationId,
|
id: operation.id,
|
||||||
'error',
|
status: operation.status,
|
||||||
`Operation failed with error: ${error.message}`,
|
startTime: operation.startTime,
|
||||||
{ stack: error.stack }
|
endTime: operation.endTime,
|
||||||
);
|
result: operation.result,
|
||||||
operation.status = 'failed';
|
error: operation.error,
|
||||||
operation.error = {
|
};
|
||||||
code: 'OPERATION_EXECUTION_ERROR',
|
}
|
||||||
message: error.message
|
|
||||||
};
|
// Then check completed operations
|
||||||
} finally {
|
const completedOperation = this.completedOperations.get(operationId);
|
||||||
operation.endTime = Date.now();
|
if (completedOperation) {
|
||||||
this.emit('statusChanged', {
|
return completedOperation;
|
||||||
operationId,
|
}
|
||||||
status: operation.status,
|
|
||||||
result: operation.result,
|
// Operation not found in either active or completed
|
||||||
error: operation.error
|
return {
|
||||||
});
|
error: {
|
||||||
|
code: 'OPERATION_NOT_FOUND',
|
||||||
|
message: `Operation ID ${operationId} not found. It may have been completed and removed from history, or the ID may be invalid.`
|
||||||
|
},
|
||||||
|
status: 'not_found'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal logging helper to prefix logs with the operation ID.
|
||||||
|
* @param {string} operationId - The ID of the operation.
|
||||||
|
* @param {'info'|'warn'|'error'|'debug'} level - Log level.
|
||||||
|
* @param {string} message - Log message.
|
||||||
|
* @param {Object} [meta] - Additional metadata.
|
||||||
|
*/
|
||||||
|
log(operationId, level, message, meta = {}) {
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
// Use the logger instance associated with the operation if available, otherwise console
|
||||||
|
const logger = operation?.log || console;
|
||||||
|
const logFn = logger[level] || logger.log || console.log; // Fallback
|
||||||
|
logFn(`[AsyncOp ${operationId}] ${message}`, meta);
|
||||||
|
}
|
||||||
|
|
||||||
// Move to completed operations if done or failed
|
// --- Basic Event Emitter ---
|
||||||
if (operation.status === 'completed' || operation.status === 'failed') {
|
on(eventName, listener) {
|
||||||
this._moveToCompleted(operationId);
|
if (!this.listeners.has(eventName)) {
|
||||||
}
|
this.listeners.set(eventName, []);
|
||||||
}
|
}
|
||||||
}
|
this.listeners.get(eventName).push(listener);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
emit(eventName, data) {
|
||||||
* Move an operation from active operations to completed operations history.
|
if (this.listeners.has(eventName)) {
|
||||||
* @param {string} operationId - The ID of the operation to move.
|
this.listeners.get(eventName).forEach(listener => listener(data));
|
||||||
* @private
|
}
|
||||||
*/
|
}
|
||||||
_moveToCompleted(operationId) {
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
if (!operation) return;
|
|
||||||
|
|
||||||
// Store only the necessary data in completed operations
|
|
||||||
const completedData = {
|
|
||||||
id: operation.id,
|
|
||||||
status: operation.status,
|
|
||||||
startTime: operation.startTime,
|
|
||||||
endTime: operation.endTime,
|
|
||||||
result: operation.result,
|
|
||||||
error: operation.error
|
|
||||||
};
|
|
||||||
|
|
||||||
this.completedOperations.set(operationId, completedData);
|
|
||||||
this.operations.delete(operationId);
|
|
||||||
|
|
||||||
// Trim completed operations if exceeding maximum
|
|
||||||
if (this.completedOperations.size > this.maxCompletedOperations) {
|
|
||||||
// Get the oldest operation (sorted by endTime)
|
|
||||||
const oldest = [...this.completedOperations.entries()].sort(
|
|
||||||
(a, b) => a[1].endTime - b[1].endTime
|
|
||||||
)[0];
|
|
||||||
|
|
||||||
if (oldest) {
|
|
||||||
this.completedOperations.delete(oldest[0]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handles progress updates from the running operation and forwards them.
|
|
||||||
* @param {string} operationId - The ID of the operation reporting progress.
|
|
||||||
* @param {Object} progress - The progress object { progress, total? }.
|
|
||||||
*/
|
|
||||||
_handleProgress(operationId, progress) {
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
if (operation && operation.reportProgress) {
|
|
||||||
try {
|
|
||||||
// Use the reportProgress function captured from the original context
|
|
||||||
operation.reportProgress(progress);
|
|
||||||
this.log(
|
|
||||||
operationId,
|
|
||||||
'debug',
|
|
||||||
`Reported progress: ${JSON.stringify(progress)}`
|
|
||||||
);
|
|
||||||
} catch (err) {
|
|
||||||
this.log(
|
|
||||||
operationId,
|
|
||||||
'warn',
|
|
||||||
`Failed to report progress: ${err.message}`
|
|
||||||
);
|
|
||||||
// Don't stop the operation, just log the reporting failure
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieves the status and result/error of an operation.
|
|
||||||
* @param {string} operationId - The ID of the operation.
|
|
||||||
* @returns {Object | null} The operation details or null if not found.
|
|
||||||
*/
|
|
||||||
getStatus(operationId) {
|
|
||||||
// First check active operations
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
if (operation) {
|
|
||||||
return {
|
|
||||||
id: operation.id,
|
|
||||||
status: operation.status,
|
|
||||||
startTime: operation.startTime,
|
|
||||||
endTime: operation.endTime,
|
|
||||||
result: operation.result,
|
|
||||||
error: operation.error
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then check completed operations
|
|
||||||
const completedOperation = this.completedOperations.get(operationId);
|
|
||||||
if (completedOperation) {
|
|
||||||
return completedOperation;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Operation not found in either active or completed
|
|
||||||
return {
|
|
||||||
error: {
|
|
||||||
code: 'OPERATION_NOT_FOUND',
|
|
||||||
message: `Operation ID ${operationId} not found. It may have been completed and removed from history, or the ID may be invalid.`
|
|
||||||
},
|
|
||||||
status: 'not_found'
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Internal logging helper to prefix logs with the operation ID.
|
|
||||||
* @param {string} operationId - The ID of the operation.
|
|
||||||
* @param {'info'|'warn'|'error'|'debug'} level - Log level.
|
|
||||||
* @param {string} message - Log message.
|
|
||||||
* @param {Object} [meta] - Additional metadata.
|
|
||||||
*/
|
|
||||||
log(operationId, level, message, meta = {}) {
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
// Use the logger instance associated with the operation if available, otherwise console
|
|
||||||
const logger = operation?.log || console;
|
|
||||||
const logFn = logger[level] || logger.log || console.log; // Fallback
|
|
||||||
logFn(`[AsyncOp ${operationId}] ${message}`, meta);
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Basic Event Emitter ---
|
|
||||||
on(eventName, listener) {
|
|
||||||
if (!this.listeners.has(eventName)) {
|
|
||||||
this.listeners.set(eventName, []);
|
|
||||||
}
|
|
||||||
this.listeners.get(eventName).push(listener);
|
|
||||||
}
|
|
||||||
|
|
||||||
emit(eventName, data) {
|
|
||||||
if (this.listeners.has(eventName)) {
|
|
||||||
this.listeners.get(eventName).forEach((listener) => listener(data));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export a singleton instance
|
// Export a singleton instance
|
||||||
|
|||||||
@@ -6,42 +6,38 @@
|
|||||||
* @returns {Promise<any>} The result of the actionFn.
|
* @returns {Promise<any>} The result of the actionFn.
|
||||||
*/
|
*/
|
||||||
export async function withSessionEnv(sessionEnv, actionFn) {
|
export async function withSessionEnv(sessionEnv, actionFn) {
|
||||||
if (
|
if (!sessionEnv || typeof sessionEnv !== 'object' || Object.keys(sessionEnv).length === 0) {
|
||||||
!sessionEnv ||
|
// If no sessionEnv is provided, just run the action directly
|
||||||
typeof sessionEnv !== 'object' ||
|
return await actionFn();
|
||||||
Object.keys(sessionEnv).length === 0
|
}
|
||||||
) {
|
|
||||||
// If no sessionEnv is provided, just run the action directly
|
const originalEnv = {};
|
||||||
return await actionFn();
|
const keysToRestore = [];
|
||||||
}
|
|
||||||
|
// Set environment variables from sessionEnv
|
||||||
const originalEnv = {};
|
for (const key in sessionEnv) {
|
||||||
const keysToRestore = [];
|
if (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {
|
||||||
|
// Store original value if it exists, otherwise mark for deletion
|
||||||
// Set environment variables from sessionEnv
|
if (process.env[key] !== undefined) {
|
||||||
for (const key in sessionEnv) {
|
originalEnv[key] = process.env[key];
|
||||||
if (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {
|
}
|
||||||
// Store original value if it exists, otherwise mark for deletion
|
keysToRestore.push(key);
|
||||||
if (process.env[key] !== undefined) {
|
process.env[key] = sessionEnv[key];
|
||||||
originalEnv[key] = process.env[key];
|
}
|
||||||
}
|
}
|
||||||
keysToRestore.push(key);
|
|
||||||
process.env[key] = sessionEnv[key];
|
try {
|
||||||
}
|
// Execute the provided action function
|
||||||
}
|
return await actionFn();
|
||||||
|
} finally {
|
||||||
try {
|
// Restore original environment variables
|
||||||
// Execute the provided action function
|
for (const key of keysToRestore) {
|
||||||
return await actionFn();
|
if (Object.prototype.hasOwnProperty.call(originalEnv, key)) {
|
||||||
} finally {
|
process.env[key] = originalEnv[key];
|
||||||
// Restore original environment variables
|
} else {
|
||||||
for (const key of keysToRestore) {
|
// If the key didn't exist originally, delete it
|
||||||
if (Object.prototype.hasOwnProperty.call(originalEnv, key)) {
|
delete process.env[key];
|
||||||
process.env[key] = originalEnv[key];
|
}
|
||||||
} else {
|
}
|
||||||
// If the key didn't exist originally, delete it
|
}
|
||||||
delete process.env[key];
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
/**
|
/**
|
||||||
* path-utils.js
|
* path-utils.js
|
||||||
* Utility functions for file path operations in Task Master
|
* Utility functions for file path operations in Task Master
|
||||||
*
|
*
|
||||||
* This module provides robust path resolution for both:
|
* This module provides robust path resolution for both:
|
||||||
* 1. PACKAGE PATH: Where task-master code is installed
|
* 1. PACKAGE PATH: Where task-master code is installed
|
||||||
* (global node_modules OR local ./node_modules/task-master OR direct from repo)
|
* (global node_modules OR local ./node_modules/task-master OR direct from repo)
|
||||||
* 2. PROJECT PATH: Where user's tasks.json resides (typically user's project root)
|
* 2. PROJECT PATH: Where user's tasks.json resides (typically user's project root)
|
||||||
*/
|
*/
|
||||||
@@ -18,43 +18,43 @@ export let lastFoundProjectRoot = null;
|
|||||||
|
|
||||||
// Project marker files that indicate a potential project root
|
// Project marker files that indicate a potential project root
|
||||||
export const PROJECT_MARKERS = [
|
export const PROJECT_MARKERS = [
|
||||||
// Task Master specific
|
// Task Master specific
|
||||||
'tasks.json',
|
'tasks.json',
|
||||||
'tasks/tasks.json',
|
'tasks/tasks.json',
|
||||||
|
|
||||||
// Common version control
|
// Common version control
|
||||||
'.git',
|
'.git',
|
||||||
'.svn',
|
'.svn',
|
||||||
|
|
||||||
// Common package files
|
// Common package files
|
||||||
'package.json',
|
'package.json',
|
||||||
'pyproject.toml',
|
'pyproject.toml',
|
||||||
'Gemfile',
|
'Gemfile',
|
||||||
'go.mod',
|
'go.mod',
|
||||||
'Cargo.toml',
|
'Cargo.toml',
|
||||||
|
|
||||||
// Common IDE/editor folders
|
// Common IDE/editor folders
|
||||||
'.cursor',
|
'.cursor',
|
||||||
'.vscode',
|
'.vscode',
|
||||||
'.idea',
|
'.idea',
|
||||||
|
|
||||||
// Common dependency directories (check if directory)
|
// Common dependency directories (check if directory)
|
||||||
'node_modules',
|
'node_modules',
|
||||||
'venv',
|
'venv',
|
||||||
'.venv',
|
'.venv',
|
||||||
|
|
||||||
// Common config files
|
// Common config files
|
||||||
'.env',
|
'.env',
|
||||||
'.eslintrc',
|
'.eslintrc',
|
||||||
'tsconfig.json',
|
'tsconfig.json',
|
||||||
'babel.config.js',
|
'babel.config.js',
|
||||||
'jest.config.js',
|
'jest.config.js',
|
||||||
'webpack.config.js',
|
'webpack.config.js',
|
||||||
|
|
||||||
// Common CI/CD files
|
// Common CI/CD files
|
||||||
'.github/workflows',
|
'.github/workflows',
|
||||||
'.gitlab-ci.yml',
|
'.gitlab-ci.yml',
|
||||||
'.circleci/config.yml'
|
'.circleci/config.yml'
|
||||||
];
|
];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -63,15 +63,15 @@ export const PROJECT_MARKERS = [
|
|||||||
* @returns {string} - Absolute path to the package installation directory
|
* @returns {string} - Absolute path to the package installation directory
|
||||||
*/
|
*/
|
||||||
export function getPackagePath() {
|
export function getPackagePath() {
|
||||||
// When running from source, __dirname is the directory containing this file
|
// When running from source, __dirname is the directory containing this file
|
||||||
// When running from npm, we need to find the package root
|
// When running from npm, we need to find the package root
|
||||||
const thisFilePath = fileURLToPath(import.meta.url);
|
const thisFilePath = fileURLToPath(import.meta.url);
|
||||||
const thisFileDir = path.dirname(thisFilePath);
|
const thisFileDir = path.dirname(thisFilePath);
|
||||||
|
|
||||||
// Navigate from core/utils up to the package root
|
// Navigate from core/utils up to the package root
|
||||||
// In dev: /path/to/task-master/mcp-server/src/core/utils -> /path/to/task-master
|
// In dev: /path/to/task-master/mcp-server/src/core/utils -> /path/to/task-master
|
||||||
// In npm: /path/to/node_modules/task-master/mcp-server/src/core/utils -> /path/to/node_modules/task-master
|
// In npm: /path/to/node_modules/task-master/mcp-server/src/core/utils -> /path/to/node_modules/task-master
|
||||||
return path.resolve(thisFileDir, '../../../../');
|
return path.resolve(thisFileDir, '../../../../');
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -82,73 +82,62 @@ export function getPackagePath() {
|
|||||||
* @throws {Error} - If tasks.json cannot be found.
|
* @throws {Error} - If tasks.json cannot be found.
|
||||||
*/
|
*/
|
||||||
export function findTasksJsonPath(args, log) {
|
export function findTasksJsonPath(args, log) {
|
||||||
// PRECEDENCE ORDER for finding tasks.json:
|
// PRECEDENCE ORDER for finding tasks.json:
|
||||||
// 1. Explicitly provided `projectRoot` in args (Highest priority, expected in MCP context)
|
// 1. Explicitly provided `projectRoot` in args (Highest priority, expected in MCP context)
|
||||||
// 2. Previously found/cached `lastFoundProjectRoot` (primarily for CLI performance)
|
// 2. Previously found/cached `lastFoundProjectRoot` (primarily for CLI performance)
|
||||||
// 3. Search upwards from current working directory (`process.cwd()`) - CLI usage
|
// 3. Search upwards from current working directory (`process.cwd()`) - CLI usage
|
||||||
|
|
||||||
|
// 1. If project root is explicitly provided (e.g., from MCP session), use it directly
|
||||||
|
if (args.projectRoot) {
|
||||||
|
const projectRoot = args.projectRoot;
|
||||||
|
log.info(`Using explicitly provided project root: ${projectRoot}`);
|
||||||
|
try {
|
||||||
|
// This will throw if tasks.json isn't found within this root
|
||||||
|
return findTasksJsonInDirectory(projectRoot, args.file, log);
|
||||||
|
} catch (error) {
|
||||||
|
// Include debug info in error
|
||||||
|
const debugInfo = {
|
||||||
|
projectRoot,
|
||||||
|
currentDir: process.cwd(),
|
||||||
|
serverDir: path.dirname(process.argv[1]),
|
||||||
|
possibleProjectRoot: path.resolve(path.dirname(process.argv[1]), '../..'),
|
||||||
|
lastFoundProjectRoot,
|
||||||
|
searchedPaths: error.message
|
||||||
|
};
|
||||||
|
|
||||||
|
error.message = `Tasks file not found in any of the expected locations relative to project root "${projectRoot}" (from session).\nDebug Info: ${JSON.stringify(debugInfo, null, 2)}`;
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Fallback logic primarily for CLI or when projectRoot isn't passed ---
|
||||||
|
|
||||||
// 1. If project root is explicitly provided (e.g., from MCP session), use it directly
|
// 2. If we have a last known project root that worked, try it first
|
||||||
if (args.projectRoot) {
|
if (lastFoundProjectRoot) {
|
||||||
const projectRoot = args.projectRoot;
|
log.info(`Trying last known project root: ${lastFoundProjectRoot}`);
|
||||||
log.info(`Using explicitly provided project root: ${projectRoot}`);
|
try {
|
||||||
try {
|
// Use the cached root
|
||||||
// This will throw if tasks.json isn't found within this root
|
const tasksPath = findTasksJsonInDirectory(lastFoundProjectRoot, args.file, log);
|
||||||
return findTasksJsonInDirectory(projectRoot, args.file, log);
|
return tasksPath; // Return if found in cached root
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Include debug info in error
|
log.info(`Task file not found in last known project root, continuing search.`);
|
||||||
const debugInfo = {
|
// Continue with search if not found in cache
|
||||||
projectRoot,
|
}
|
||||||
currentDir: process.cwd(),
|
}
|
||||||
serverDir: path.dirname(process.argv[1]),
|
|
||||||
possibleProjectRoot: path.resolve(
|
// 3. Start search from current directory (most common CLI scenario)
|
||||||
path.dirname(process.argv[1]),
|
const startDir = process.cwd();
|
||||||
'../..'
|
log.info(`Searching for tasks.json starting from current directory: ${startDir}`);
|
||||||
),
|
|
||||||
lastFoundProjectRoot,
|
// Try to find tasks.json by walking up the directory tree from cwd
|
||||||
searchedPaths: error.message
|
try {
|
||||||
};
|
// This will throw if not found in the CWD tree
|
||||||
|
return findTasksJsonWithParentSearch(startDir, args.file, log);
|
||||||
error.message = `Tasks file not found in any of the expected locations relative to project root "${projectRoot}" (from session).\nDebug Info: ${JSON.stringify(debugInfo, null, 2)}`;
|
} catch (error) {
|
||||||
throw error;
|
// If all attempts fail, augment and throw the original error from CWD search
|
||||||
}
|
error.message = `${error.message}\n\nPossible solutions:\n1. Run the command from your project directory containing tasks.json\n2. Use --project-root=/path/to/project to specify the project location (if using CLI)\n3. Ensure the project root is correctly passed from the client (if using MCP)\n\nCurrent working directory: ${startDir}\nLast known project root: ${lastFoundProjectRoot}\nProject root from args: ${args.projectRoot}`;
|
||||||
}
|
throw error;
|
||||||
|
}
|
||||||
// --- Fallback logic primarily for CLI or when projectRoot isn't passed ---
|
|
||||||
|
|
||||||
// 2. If we have a last known project root that worked, try it first
|
|
||||||
if (lastFoundProjectRoot) {
|
|
||||||
log.info(`Trying last known project root: ${lastFoundProjectRoot}`);
|
|
||||||
try {
|
|
||||||
// Use the cached root
|
|
||||||
const tasksPath = findTasksJsonInDirectory(
|
|
||||||
lastFoundProjectRoot,
|
|
||||||
args.file,
|
|
||||||
log
|
|
||||||
);
|
|
||||||
return tasksPath; // Return if found in cached root
|
|
||||||
} catch (error) {
|
|
||||||
log.info(
|
|
||||||
`Task file not found in last known project root, continuing search.`
|
|
||||||
);
|
|
||||||
// Continue with search if not found in cache
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Start search from current directory (most common CLI scenario)
|
|
||||||
const startDir = process.cwd();
|
|
||||||
log.info(
|
|
||||||
`Searching for tasks.json starting from current directory: ${startDir}`
|
|
||||||
);
|
|
||||||
|
|
||||||
// Try to find tasks.json by walking up the directory tree from cwd
|
|
||||||
try {
|
|
||||||
// This will throw if not found in the CWD tree
|
|
||||||
return findTasksJsonWithParentSearch(startDir, args.file, log);
|
|
||||||
} catch (error) {
|
|
||||||
// If all attempts fail, augment and throw the original error from CWD search
|
|
||||||
error.message = `${error.message}\n\nPossible solutions:\n1. Run the command from your project directory containing tasks.json\n2. Use --project-root=/path/to/project to specify the project location (if using CLI)\n3. Ensure the project root is correctly passed from the client (if using MCP)\n\nCurrent working directory: ${startDir}\nLast known project root: ${lastFoundProjectRoot}\nProject root from args: ${args.projectRoot}`;
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -157,11 +146,11 @@ export function findTasksJsonPath(args, log) {
|
|||||||
* @returns {boolean} - True if the directory contains any project markers
|
* @returns {boolean} - True if the directory contains any project markers
|
||||||
*/
|
*/
|
||||||
function hasProjectMarkers(dirPath) {
|
function hasProjectMarkers(dirPath) {
|
||||||
return PROJECT_MARKERS.some((marker) => {
|
return PROJECT_MARKERS.some(marker => {
|
||||||
const markerPath = path.join(dirPath, marker);
|
const markerPath = path.join(dirPath, marker);
|
||||||
// Check if the marker exists as either a file or directory
|
// Check if the marker exists as either a file or directory
|
||||||
return fs.existsSync(markerPath);
|
return fs.existsSync(markerPath);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -173,41 +162,35 @@ function hasProjectMarkers(dirPath) {
|
|||||||
* @throws {Error} - If tasks.json cannot be found
|
* @throws {Error} - If tasks.json cannot be found
|
||||||
*/
|
*/
|
||||||
function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
|
function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
|
||||||
const possiblePaths = [];
|
const possiblePaths = [];
|
||||||
|
|
||||||
// 1. If a file is explicitly provided relative to dirPath
|
// 1. If a file is explicitly provided relative to dirPath
|
||||||
if (explicitFilePath) {
|
if (explicitFilePath) {
|
||||||
possiblePaths.push(path.resolve(dirPath, explicitFilePath));
|
possiblePaths.push(path.resolve(dirPath, explicitFilePath));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Check the standard locations relative to dirPath
|
// 2. Check the standard locations relative to dirPath
|
||||||
possiblePaths.push(
|
possiblePaths.push(
|
||||||
path.join(dirPath, 'tasks.json'),
|
path.join(dirPath, 'tasks.json'),
|
||||||
path.join(dirPath, 'tasks', 'tasks.json')
|
path.join(dirPath, 'tasks', 'tasks.json')
|
||||||
);
|
);
|
||||||
|
|
||||||
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
|
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
|
||||||
|
|
||||||
// Find the first existing path
|
// Find the first existing path
|
||||||
for (const p of possiblePaths) {
|
for (const p of possiblePaths) {
|
||||||
log.info(`Checking if exists: ${p}`);
|
if (fs.existsSync(p)) {
|
||||||
const exists = fs.existsSync(p);
|
log.info(`Found tasks file at: ${p}`);
|
||||||
log.info(`Path ${p} exists: ${exists}`);
|
// Store the project root for future use
|
||||||
|
lastFoundProjectRoot = dirPath;
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (exists) {
|
// If no file was found, throw an error
|
||||||
log.info(`Found tasks file at: ${p}`);
|
const error = new Error(`Tasks file not found in any of the expected locations relative to ${dirPath}: ${possiblePaths.join(', ')}`);
|
||||||
// Store the project root for future use
|
error.code = 'TASKS_FILE_NOT_FOUND';
|
||||||
lastFoundProjectRoot = dirPath;
|
throw error;
|
||||||
return p;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no file was found, throw an error
|
|
||||||
const error = new Error(
|
|
||||||
`Tasks file not found in any of the expected locations relative to ${dirPath}: ${possiblePaths.join(', ')}`
|
|
||||||
);
|
|
||||||
error.code = 'TASKS_FILE_NOT_FOUND';
|
|
||||||
throw error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -220,174 +203,66 @@ function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
|
|||||||
* @throws {Error} - If tasks.json cannot be found in any parent directory
|
* @throws {Error} - If tasks.json cannot be found in any parent directory
|
||||||
*/
|
*/
|
||||||
function findTasksJsonWithParentSearch(startDir, explicitFilePath, log) {
|
function findTasksJsonWithParentSearch(startDir, explicitFilePath, log) {
|
||||||
let currentDir = startDir;
|
let currentDir = startDir;
|
||||||
const rootDir = path.parse(currentDir).root;
|
const rootDir = path.parse(currentDir).root;
|
||||||
|
|
||||||
// Keep traversing up until we hit the root directory
|
// Keep traversing up until we hit the root directory
|
||||||
while (currentDir !== rootDir) {
|
while (currentDir !== rootDir) {
|
||||||
// First check for tasks.json directly
|
// First check for tasks.json directly
|
||||||
try {
|
try {
|
||||||
return findTasksJsonInDirectory(currentDir, explicitFilePath, log);
|
return findTasksJsonInDirectory(currentDir, explicitFilePath, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// If tasks.json not found but the directory has project markers,
|
// If tasks.json not found but the directory has project markers,
|
||||||
// log it as a potential project root (helpful for debugging)
|
// log it as a potential project root (helpful for debugging)
|
||||||
if (hasProjectMarkers(currentDir)) {
|
if (hasProjectMarkers(currentDir)) {
|
||||||
log.info(`Found project markers in ${currentDir}, but no tasks.json`);
|
log.info(`Found project markers in ${currentDir}, but no tasks.json`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move up to parent directory
|
// Move up to parent directory
|
||||||
const parentDir = path.dirname(currentDir);
|
const parentDir = path.dirname(currentDir);
|
||||||
|
|
||||||
// Check if we've reached the root
|
// Check if we've reached the root
|
||||||
if (parentDir === currentDir) {
|
if (parentDir === currentDir) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(
|
log.info(`Tasks file not found in ${currentDir}, searching in parent directory: ${parentDir}`);
|
||||||
`Tasks file not found in ${currentDir}, searching in parent directory: ${parentDir}`
|
currentDir = parentDir;
|
||||||
);
|
}
|
||||||
currentDir = parentDir;
|
}
|
||||||
}
|
|
||||||
}
|
// If we've searched all the way to the root and found nothing
|
||||||
|
const error = new Error(`Tasks file not found in ${startDir} or any parent directory.`);
|
||||||
// If we've searched all the way to the root and found nothing
|
error.code = 'TASKS_FILE_NOT_FOUND';
|
||||||
const error = new Error(
|
throw error;
|
||||||
`Tasks file not found in ${startDir} or any parent directory.`
|
|
||||||
);
|
|
||||||
error.code = 'TASKS_FILE_NOT_FOUND';
|
|
||||||
throw error;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: findTasksWithNpmConsideration is not used by findTasksJsonPath and might be legacy or used elsewhere.
|
// Note: findTasksWithNpmConsideration is not used by findTasksJsonPath and might be legacy or used elsewhere.
|
||||||
// If confirmed unused, it could potentially be removed in a separate cleanup.
|
// If confirmed unused, it could potentially be removed in a separate cleanup.
|
||||||
function findTasksWithNpmConsideration(startDir, log) {
|
function findTasksWithNpmConsideration(startDir, log) {
|
||||||
// First try our recursive parent search from cwd
|
// First try our recursive parent search from cwd
|
||||||
try {
|
try {
|
||||||
return findTasksJsonWithParentSearch(startDir, null, log);
|
return findTasksJsonWithParentSearch(startDir, null, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// If that fails, try looking relative to the executable location
|
// If that fails, try looking relative to the executable location
|
||||||
const execPath = process.argv[1];
|
const execPath = process.argv[1];
|
||||||
const execDir = path.dirname(execPath);
|
const execDir = path.dirname(execPath);
|
||||||
log.info(`Looking for tasks file relative to executable at: ${execDir}`);
|
log.info(`Looking for tasks file relative to executable at: ${execDir}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
return findTasksJsonWithParentSearch(execDir, null, log);
|
return findTasksJsonWithParentSearch(execDir, null, log);
|
||||||
} catch (secondError) {
|
} catch (secondError) {
|
||||||
// If that also fails, check standard locations in user's home directory
|
// If that also fails, check standard locations in user's home directory
|
||||||
const homeDir = os.homedir();
|
const homeDir = os.homedir();
|
||||||
log.info(`Looking for tasks file in home directory: ${homeDir}`);
|
log.info(`Looking for tasks file in home directory: ${homeDir}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Check standard locations in home dir
|
// Check standard locations in home dir
|
||||||
return findTasksJsonInDirectory(
|
return findTasksJsonInDirectory(path.join(homeDir, '.task-master'), null, log);
|
||||||
path.join(homeDir, '.task-master'),
|
} catch (thirdError) {
|
||||||
null,
|
// If all approaches fail, throw the original error
|
||||||
log
|
throw error;
|
||||||
);
|
}
|
||||||
} catch (thirdError) {
|
}
|
||||||
// If all approaches fail, throw the original error
|
}
|
||||||
throw error;
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Finds potential PRD document files based on common naming patterns
|
|
||||||
* @param {string} projectRoot - The project root directory
|
|
||||||
* @param {string|null} explicitPath - Optional explicit path provided by the user
|
|
||||||
* @param {Object} log - Logger object
|
|
||||||
* @returns {string|null} - The path to the first found PRD file, or null if none found
|
|
||||||
*/
|
|
||||||
export function findPRDDocumentPath(projectRoot, explicitPath, log) {
|
|
||||||
// If explicit path is provided, check if it exists
|
|
||||||
if (explicitPath) {
|
|
||||||
const fullPath = path.isAbsolute(explicitPath)
|
|
||||||
? explicitPath
|
|
||||||
: path.resolve(projectRoot, explicitPath);
|
|
||||||
|
|
||||||
if (fs.existsSync(fullPath)) {
|
|
||||||
log.info(`Using provided PRD document path: ${fullPath}`);
|
|
||||||
return fullPath;
|
|
||||||
} else {
|
|
||||||
log.warn(
|
|
||||||
`Provided PRD document path not found: ${fullPath}, will search for alternatives`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Common locations and file patterns for PRD documents
|
|
||||||
const commonLocations = [
|
|
||||||
'', // Project root
|
|
||||||
'scripts/'
|
|
||||||
];
|
|
||||||
|
|
||||||
const commonFileNames = ['PRD.md', 'prd.md', 'PRD.txt', 'prd.txt'];
|
|
||||||
|
|
||||||
// Check all possible combinations
|
|
||||||
for (const location of commonLocations) {
|
|
||||||
for (const fileName of commonFileNames) {
|
|
||||||
const potentialPath = path.join(projectRoot, location, fileName);
|
|
||||||
if (fs.existsSync(potentialPath)) {
|
|
||||||
log.info(`Found PRD document at: ${potentialPath}`);
|
|
||||||
return potentialPath;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.warn(`No PRD document found in common locations within ${projectRoot}`);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Resolves the tasks output directory path
|
|
||||||
* @param {string} projectRoot - The project root directory
|
|
||||||
* @param {string|null} explicitPath - Optional explicit output path provided by the user
|
|
||||||
* @param {Object} log - Logger object
|
|
||||||
* @returns {string} - The resolved tasks directory path
|
|
||||||
*/
|
|
||||||
export function resolveTasksOutputPath(projectRoot, explicitPath, log) {
|
|
||||||
// If explicit path is provided, use it
|
|
||||||
if (explicitPath) {
|
|
||||||
const outputPath = path.isAbsolute(explicitPath)
|
|
||||||
? explicitPath
|
|
||||||
: path.resolve(projectRoot, explicitPath);
|
|
||||||
|
|
||||||
log.info(`Using provided tasks output path: ${outputPath}`);
|
|
||||||
return outputPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default output path: tasks/tasks.json in the project root
|
|
||||||
const defaultPath = path.resolve(projectRoot, 'tasks', 'tasks.json');
|
|
||||||
log.info(`Using default tasks output path: ${defaultPath}`);
|
|
||||||
|
|
||||||
// Ensure the directory exists
|
|
||||||
const outputDir = path.dirname(defaultPath);
|
|
||||||
if (!fs.existsSync(outputDir)) {
|
|
||||||
log.info(`Creating tasks directory: ${outputDir}`);
|
|
||||||
fs.mkdirSync(outputDir, { recursive: true });
|
|
||||||
}
|
|
||||||
|
|
||||||
return defaultPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Resolves various file paths needed for MCP operations based on project root
|
|
||||||
* @param {string} projectRoot - The project root directory
|
|
||||||
* @param {Object} args - Command arguments that may contain explicit paths
|
|
||||||
* @param {Object} log - Logger object
|
|
||||||
* @returns {Object} - An object containing resolved paths
|
|
||||||
*/
|
|
||||||
export function resolveProjectPaths(projectRoot, args, log) {
|
|
||||||
const prdPath = findPRDDocumentPath(projectRoot, args.input, log);
|
|
||||||
const tasksJsonPath = resolveTasksOutputPath(projectRoot, args.output, log);
|
|
||||||
|
|
||||||
// You can add more path resolutions here as needed
|
|
||||||
|
|
||||||
return {
|
|
||||||
projectRoot,
|
|
||||||
prdPath,
|
|
||||||
tasksJsonPath
|
|
||||||
// Add additional path properties as needed
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
import { FastMCP } from 'fastmcp';
|
import { FastMCP } from "fastmcp";
|
||||||
import path from 'path';
|
import path from "path";
|
||||||
import dotenv from 'dotenv';
|
import dotenv from "dotenv";
|
||||||
import { fileURLToPath } from 'url';
|
import { fileURLToPath } from "url";
|
||||||
import fs from 'fs';
|
import fs from "fs";
|
||||||
import logger from './logger.js';
|
import logger from "./logger.js";
|
||||||
import { registerTaskMasterTools } from './tools/index.js';
|
import { registerTaskMasterTools } from "./tools/index.js";
|
||||||
import { asyncOperationManager } from './core/utils/async-manager.js';
|
import { asyncOperationManager } from './core/utils/async-manager.js';
|
||||||
|
|
||||||
// Load environment variables
|
// Load environment variables
|
||||||
@@ -18,74 +18,73 @@ const __dirname = path.dirname(__filename);
|
|||||||
* Main MCP server class that integrates with Task Master
|
* Main MCP server class that integrates with Task Master
|
||||||
*/
|
*/
|
||||||
class TaskMasterMCPServer {
|
class TaskMasterMCPServer {
|
||||||
constructor() {
|
constructor() {
|
||||||
// Get version from package.json using synchronous fs
|
// Get version from package.json using synchronous fs
|
||||||
const packagePath = path.join(__dirname, '../../package.json');
|
const packagePath = path.join(__dirname, "../../package.json");
|
||||||
const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8'));
|
const packageJson = JSON.parse(fs.readFileSync(packagePath, "utf8"));
|
||||||
|
|
||||||
this.options = {
|
this.options = {
|
||||||
name: 'Task Master MCP Server',
|
name: "Task Master MCP Server",
|
||||||
version: packageJson.version
|
version: packageJson.version,
|
||||||
};
|
};
|
||||||
|
|
||||||
this.server = new FastMCP(this.options);
|
this.server = new FastMCP(this.options);
|
||||||
this.initialized = false;
|
this.initialized = false;
|
||||||
|
|
||||||
this.server.addResource({});
|
this.server.addResource({});
|
||||||
|
|
||||||
this.server.addResourceTemplate({});
|
this.server.addResourceTemplate({});
|
||||||
|
|
||||||
// Make the manager accessible (e.g., pass it to tool registration)
|
// Make the manager accessible (e.g., pass it to tool registration)
|
||||||
this.asyncManager = asyncOperationManager;
|
this.asyncManager = asyncOperationManager;
|
||||||
|
|
||||||
// Bind methods
|
// Bind methods
|
||||||
this.init = this.init.bind(this);
|
this.init = this.init.bind(this);
|
||||||
this.start = this.start.bind(this);
|
this.start = this.start.bind(this);
|
||||||
this.stop = this.stop.bind(this);
|
this.stop = this.stop.bind(this);
|
||||||
|
|
||||||
// Setup logging
|
// Setup logging
|
||||||
this.logger = logger;
|
this.logger = logger;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialize the MCP server with necessary tools and routes
|
* Initialize the MCP server with necessary tools and routes
|
||||||
*/
|
*/
|
||||||
async init() {
|
async init() {
|
||||||
if (this.initialized) return;
|
if (this.initialized) return;
|
||||||
|
|
||||||
// Pass the manager instance to the tool registration function
|
// Pass the manager instance to the tool registration function
|
||||||
registerTaskMasterTools(this.server, this.asyncManager);
|
registerTaskMasterTools(this.server, this.asyncManager);
|
||||||
|
|
||||||
this.initialized = true;
|
this.initialized = true;
|
||||||
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Start the MCP server
|
* Start the MCP server
|
||||||
*/
|
*/
|
||||||
async start() {
|
async start() {
|
||||||
if (!this.initialized) {
|
if (!this.initialized) {
|
||||||
await this.init();
|
await this.init();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the FastMCP server with increased timeout
|
// Start the FastMCP server
|
||||||
await this.server.start({
|
await this.server.start({
|
||||||
transportType: 'stdio',
|
transportType: "stdio",
|
||||||
timeout: 120000 // 2 minutes timeout (in milliseconds)
|
});
|
||||||
});
|
|
||||||
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stop the MCP server
|
* Stop the MCP server
|
||||||
*/
|
*/
|
||||||
async stop() {
|
async stop() {
|
||||||
if (this.server) {
|
if (this.server) {
|
||||||
await this.server.stop();
|
await this.server.stop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export the manager from here as well, if needed elsewhere
|
// Export the manager from here as well, if needed elsewhere
|
||||||
|
|||||||
@@ -1,19 +1,18 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from "chalk";
|
||||||
import { isSilentMode } from '../../scripts/modules/utils.js';
|
|
||||||
|
|
||||||
// Define log levels
|
// Define log levels
|
||||||
const LOG_LEVELS = {
|
const LOG_LEVELS = {
|
||||||
debug: 0,
|
debug: 0,
|
||||||
info: 1,
|
info: 1,
|
||||||
warn: 2,
|
warn: 2,
|
||||||
error: 3,
|
error: 3,
|
||||||
success: 4
|
success: 4,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get log level from environment or default to info
|
// Get log level from environment or default to info
|
||||||
const LOG_LEVEL = process.env.LOG_LEVEL
|
const LOG_LEVEL = process.env.LOG_LEVEL
|
||||||
? (LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] ?? LOG_LEVELS.info)
|
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] ?? LOG_LEVELS.info
|
||||||
: LOG_LEVELS.info;
|
: LOG_LEVELS.info;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Logs a message with the specified level
|
* Logs a message with the specified level
|
||||||
@@ -21,66 +20,51 @@ const LOG_LEVEL = process.env.LOG_LEVEL
|
|||||||
* @param {...any} args - Arguments to log
|
* @param {...any} args - Arguments to log
|
||||||
*/
|
*/
|
||||||
function log(level, ...args) {
|
function log(level, ...args) {
|
||||||
// Skip logging if silent mode is enabled
|
// Use text prefixes instead of emojis
|
||||||
if (isSilentMode()) {
|
const prefixes = {
|
||||||
return;
|
debug: chalk.gray("[DEBUG]"),
|
||||||
}
|
info: chalk.blue("[INFO]"),
|
||||||
|
warn: chalk.yellow("[WARN]"),
|
||||||
|
error: chalk.red("[ERROR]"),
|
||||||
|
success: chalk.green("[SUCCESS]"),
|
||||||
|
};
|
||||||
|
|
||||||
// Use text prefixes instead of emojis
|
if (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {
|
||||||
const prefixes = {
|
const prefix = prefixes[level] || "";
|
||||||
debug: chalk.gray('[DEBUG]'),
|
let coloredArgs = args;
|
||||||
info: chalk.blue('[INFO]'),
|
|
||||||
warn: chalk.yellow('[WARN]'),
|
|
||||||
error: chalk.red('[ERROR]'),
|
|
||||||
success: chalk.green('[SUCCESS]')
|
|
||||||
};
|
|
||||||
|
|
||||||
if (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {
|
try {
|
||||||
const prefix = prefixes[level] || '';
|
switch(level) {
|
||||||
let coloredArgs = args;
|
case "error":
|
||||||
|
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.red(arg) : arg);
|
||||||
|
break;
|
||||||
|
case "warn":
|
||||||
|
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.yellow(arg) : arg);
|
||||||
|
break;
|
||||||
|
case "success":
|
||||||
|
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.green(arg) : arg);
|
||||||
|
break;
|
||||||
|
case "info":
|
||||||
|
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.blue(arg) : arg);
|
||||||
|
break;
|
||||||
|
case "debug":
|
||||||
|
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.gray(arg) : arg);
|
||||||
|
break;
|
||||||
|
// default: use original args (no color)
|
||||||
|
}
|
||||||
|
} catch (colorError) {
|
||||||
|
// Fallback if chalk fails on an argument
|
||||||
|
// Use console.error here for internal logger errors, separate from normal logging
|
||||||
|
console.error("Internal Logger Error applying chalk color:", colorError);
|
||||||
|
coloredArgs = args;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
// Revert to console.log - FastMCP's context logger (context.log)
|
||||||
switch (level) {
|
// is responsible for directing logs correctly (e.g., to stderr)
|
||||||
case 'error':
|
// during tool execution without upsetting the client connection.
|
||||||
coloredArgs = args.map((arg) =>
|
// Logs outside of tool execution (like startup) will go to stdout.
|
||||||
typeof arg === 'string' ? chalk.red(arg) : arg
|
console.log(prefix, ...coloredArgs);
|
||||||
);
|
}
|
||||||
break;
|
|
||||||
case 'warn':
|
|
||||||
coloredArgs = args.map((arg) =>
|
|
||||||
typeof arg === 'string' ? chalk.yellow(arg) : arg
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
case 'success':
|
|
||||||
coloredArgs = args.map((arg) =>
|
|
||||||
typeof arg === 'string' ? chalk.green(arg) : arg
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
case 'info':
|
|
||||||
coloredArgs = args.map((arg) =>
|
|
||||||
typeof arg === 'string' ? chalk.blue(arg) : arg
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
case 'debug':
|
|
||||||
coloredArgs = args.map((arg) =>
|
|
||||||
typeof arg === 'string' ? chalk.gray(arg) : arg
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
// default: use original args (no color)
|
|
||||||
}
|
|
||||||
} catch (colorError) {
|
|
||||||
// Fallback if chalk fails on an argument
|
|
||||||
// Use console.error here for internal logger errors, separate from normal logging
|
|
||||||
console.error('Internal Logger Error applying chalk color:', colorError);
|
|
||||||
coloredArgs = args;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Revert to console.log - FastMCP's context logger (context.log)
|
|
||||||
// is responsible for directing logs correctly (e.g., to stderr)
|
|
||||||
// during tool execution without upsetting the client connection.
|
|
||||||
// Logs outside of tool execution (like startup) will go to stdout.
|
|
||||||
console.log(prefix, ...coloredArgs);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -88,19 +72,16 @@ function log(level, ...args) {
|
|||||||
* @returns {Object} Logger object with info, error, debug, warn, and success methods
|
* @returns {Object} Logger object with info, error, debug, warn, and success methods
|
||||||
*/
|
*/
|
||||||
export function createLogger() {
|
export function createLogger() {
|
||||||
const createLogMethod =
|
const createLogMethod = (level) => (...args) => log(level, ...args);
|
||||||
(level) =>
|
|
||||||
(...args) =>
|
|
||||||
log(level, ...args);
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
debug: createLogMethod('debug'),
|
debug: createLogMethod("debug"),
|
||||||
info: createLogMethod('info'),
|
info: createLogMethod("info"),
|
||||||
warn: createLogMethod('warn'),
|
warn: createLogMethod("warn"),
|
||||||
error: createLogMethod('error'),
|
error: createLogMethod("error"),
|
||||||
success: createLogMethod('success'),
|
success: createLogMethod("success"),
|
||||||
log: log // Also expose the raw log function
|
log: log, // Also expose the raw log function
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export a default logger instance
|
// Export a default logger instance
|
||||||
|
|||||||
1
mcp-server/src/logger.js (lines 31-41)
Normal file
1
mcp-server/src/logger.js (lines 31-41)
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
@@ -3,95 +3,63 @@
|
|||||||
* Tool for adding a dependency to a task
|
* Tool for adding a dependency to a task
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { addDependencyDirect } from '../core/task-master-core.js';
|
import { addDependencyDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the addDependency tool with the MCP server
|
* Register the addDependency tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerAddDependencyTool(server) {
|
export function registerAddDependencyTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'add_dependency',
|
name: "add_dependency",
|
||||||
description: 'Add a dependency relationship between two tasks',
|
description: "Add a dependency relationship between two tasks",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe('ID of task that will depend on another task'),
|
id: z.string().describe("ID of task that will depend on another task"),
|
||||||
dependsOn: z
|
dependsOn: z.string().describe("ID of task that will become a dependency"),
|
||||||
.string()
|
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||||
.describe('ID of task that will become a dependency'),
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
file: z
|
}),
|
||||||
.string()
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
.optional()
|
try {
|
||||||
.describe(
|
log.info(`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`);
|
||||||
'Absolute path to the tasks file (default: tasks/tasks.json)'
|
reportProgress({ progress: 0 });
|
||||||
),
|
|
||||||
projectRoot: z
|
// Get project root using the utility function
|
||||||
.string()
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
|
||||||
}),
|
// Fallback to args.projectRoot if session didn't provide one
|
||||||
execute: async (args, { log, session }) => {
|
if (!rootFolder && args.projectRoot) {
|
||||||
try {
|
rootFolder = args.projectRoot;
|
||||||
log.info(
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`
|
}
|
||||||
);
|
|
||||||
|
// Call the direct function with the resolved rootFolder
|
||||||
|
const result = await addDependencyDirect({
|
||||||
|
projectRoot: rootFolder,
|
||||||
|
...args
|
||||||
|
}, log, { reportProgress, mcpLog: log, session});
|
||||||
|
|
||||||
// Get project root from args or session
|
reportProgress({ progress: 100 });
|
||||||
const rootFolder =
|
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
// Log result
|
||||||
|
if (result.success) {
|
||||||
// Ensure project root was determined
|
log.info(`Successfully added dependency: ${result.data.message}`);
|
||||||
if (!rootFolder) {
|
} else {
|
||||||
return createErrorResponse(
|
log.error(`Failed to add dependency: ${result.error.message}`);
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
}
|
||||||
);
|
|
||||||
}
|
// Use handleApiResult to format the response
|
||||||
|
return handleApiResult(result, log, 'Error adding dependency');
|
||||||
// Resolve the path to tasks.json
|
} catch (error) {
|
||||||
let tasksJsonPath;
|
log.error(`Error in addDependency tool: ${error.message}`);
|
||||||
try {
|
return createErrorResponse(error.message);
|
||||||
tasksJsonPath = findTasksJsonPath(
|
}
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
},
|
||||||
log
|
});
|
||||||
);
|
}
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the direct function with the resolved path
|
|
||||||
const result = await addDependencyDirect(
|
|
||||||
{
|
|
||||||
// Pass the explicitly resolved path
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
// Pass other relevant args
|
|
||||||
id: args.id,
|
|
||||||
dependsOn: args.dependsOn
|
|
||||||
},
|
|
||||||
log
|
|
||||||
// Remove context object
|
|
||||||
);
|
|
||||||
|
|
||||||
// Log result
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Successfully added dependency: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(`Failed to add dependency: ${result.error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use handleApiResult to format the response
|
|
||||||
return handleApiResult(result, log, 'Error adding dependency');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in addDependency tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,116 +3,61 @@
|
|||||||
* Tool for adding subtasks to existing tasks
|
* Tool for adding subtasks to existing tasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { addSubtaskDirect } from '../core/task-master-core.js';
|
import { addSubtaskDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the addSubtask tool with the MCP server
|
* Register the addSubtask tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerAddSubtaskTool(server) {
|
export function registerAddSubtaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'add_subtask',
|
name: "add_subtask",
|
||||||
description: 'Add a subtask to an existing task',
|
description: "Add a subtask to an existing task",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe('Parent task ID (required)'),
|
id: z.string().describe("Parent task ID (required)"),
|
||||||
taskId: z
|
taskId: z.string().optional().describe("Existing task ID to convert to subtask"),
|
||||||
.string()
|
title: z.string().optional().describe("Title for the new subtask (when creating a new subtask)"),
|
||||||
.optional()
|
description: z.string().optional().describe("Description for the new subtask"),
|
||||||
.describe('Existing task ID to convert to subtask'),
|
details: z.string().optional().describe("Implementation details for the new subtask"),
|
||||||
title: z
|
status: z.string().optional().describe("Status for the new subtask (default: 'pending')"),
|
||||||
.string()
|
dependencies: z.string().optional().describe("Comma-separated list of dependency IDs for the new subtask"),
|
||||||
.optional()
|
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||||
.describe('Title for the new subtask (when creating a new subtask)'),
|
skipGenerate: z.boolean().optional().describe("Skip regenerating task files"),
|
||||||
description: z
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
.string()
|
}),
|
||||||
.optional()
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
.describe('Description for the new subtask'),
|
try {
|
||||||
details: z
|
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
||||||
.string()
|
|
||||||
.optional()
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
.describe('Implementation details for the new subtask'),
|
|
||||||
status: z
|
if (!rootFolder && args.projectRoot) {
|
||||||
.string()
|
rootFolder = args.projectRoot;
|
||||||
.optional()
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
.describe("Status for the new subtask (default: 'pending')"),
|
}
|
||||||
dependencies: z
|
|
||||||
.string()
|
const result = await addSubtaskDirect({
|
||||||
.optional()
|
projectRoot: rootFolder,
|
||||||
.describe('Comma-separated list of dependency IDs for the new subtask'),
|
...args
|
||||||
file: z
|
}, log, { reportProgress, mcpLog: log, session});
|
||||||
.string()
|
|
||||||
.optional()
|
if (result.success) {
|
||||||
.describe(
|
log.info(`Subtask added successfully: ${result.data.message}`);
|
||||||
'Absolute path to the tasks file (default: tasks/tasks.json)'
|
} else {
|
||||||
),
|
log.error(`Failed to add subtask: ${result.error.message}`);
|
||||||
skipGenerate: z
|
}
|
||||||
.boolean()
|
|
||||||
.optional()
|
return handleApiResult(result, log, 'Error adding subtask');
|
||||||
.describe('Skip regenerating task files'),
|
} catch (error) {
|
||||||
projectRoot: z
|
log.error(`Error in addSubtask tool: ${error.message}`);
|
||||||
.string()
|
return createErrorResponse(error.message);
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
}
|
||||||
}),
|
},
|
||||||
execute: async (args, { log, session }) => {
|
});
|
||||||
try {
|
}
|
||||||
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
|
||||||
|
|
||||||
// Get project root from args or session
|
|
||||||
const rootFolder =
|
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
|
||||||
|
|
||||||
if (!rootFolder) {
|
|
||||||
return createErrorResponse(
|
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let tasksJsonPath;
|
|
||||||
try {
|
|
||||||
tasksJsonPath = findTasksJsonPath(
|
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
|
||||||
log
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await addSubtaskDirect(
|
|
||||||
{
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
id: args.id,
|
|
||||||
taskId: args.taskId,
|
|
||||||
title: args.title,
|
|
||||||
description: args.description,
|
|
||||||
details: args.details,
|
|
||||||
status: args.status,
|
|
||||||
dependencies: args.dependencies,
|
|
||||||
skipGenerate: args.skipGenerate
|
|
||||||
},
|
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Subtask added successfully: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(`Failed to add subtask: ${result.error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error adding subtask');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in addSubtask tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,118 +3,64 @@
|
|||||||
* Tool to add a new task using AI
|
* Tool to add a new task using AI
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
createErrorResponse,
|
handleApiResult,
|
||||||
createContentResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession,
|
createContentResponse,
|
||||||
executeTaskMasterCommand,
|
getProjectRootFromSession
|
||||||
handleApiResult
|
} from "./utils.js";
|
||||||
} from './utils.js';
|
import { addTaskDirect } from "../core/task-master-core.js";
|
||||||
import { addTaskDirect } from '../core/task-master-core.js';
|
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the addTask tool with the MCP server
|
* Register the add-task tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
|
* @param {AsyncOperationManager} asyncManager - The async operation manager instance.
|
||||||
*/
|
*/
|
||||||
export function registerAddTaskTool(server) {
|
export function registerAddTaskTool(server, asyncManager) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'add_task',
|
name: "add_task",
|
||||||
description: 'Add a new task using AI',
|
description: "Starts adding a new task using AI in the background.",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
prompt: z
|
prompt: z.string().describe("Description of the task to add"),
|
||||||
.string()
|
dependencies: z.string().optional().describe("Comma-separated list of task IDs this task depends on"),
|
||||||
.optional()
|
priority: z.string().optional().describe("Task priority (high, medium, low)"),
|
||||||
.describe(
|
file: z.string().optional().describe("Path to the tasks file"),
|
||||||
'Description of the task to add (required if not using manual fields)'
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
),
|
}),
|
||||||
title: z
|
execute: async (args, context) => {
|
||||||
.string()
|
const { log, reportProgress, session } = context;
|
||||||
.optional()
|
try {
|
||||||
.describe('Task title (for manual task creation)'),
|
log.info(`MCP add_task request received with prompt: \"${args.prompt}\"`);
|
||||||
description: z
|
|
||||||
.string()
|
if (!args.prompt) {
|
||||||
.optional()
|
return createErrorResponse("Prompt is required for add_task.", "VALIDATION_ERROR");
|
||||||
.describe('Task description (for manual task creation)'),
|
}
|
||||||
details: z
|
|
||||||
.string()
|
|
||||||
.optional()
|
|
||||||
.describe('Implementation details (for manual task creation)'),
|
|
||||||
testStrategy: z
|
|
||||||
.string()
|
|
||||||
.optional()
|
|
||||||
.describe('Test strategy (for manual task creation)'),
|
|
||||||
dependencies: z
|
|
||||||
.string()
|
|
||||||
.optional()
|
|
||||||
.describe('Comma-separated list of task IDs this task depends on'),
|
|
||||||
priority: z
|
|
||||||
.string()
|
|
||||||
.optional()
|
|
||||||
.describe('Task priority (high, medium, low)'),
|
|
||||||
file: z
|
|
||||||
.string()
|
|
||||||
.optional()
|
|
||||||
.describe('Path to the tasks file (default: tasks/tasks.json)'),
|
|
||||||
projectRoot: z
|
|
||||||
.string()
|
|
||||||
.describe('The directory of the project. Must be an absolute path.'),
|
|
||||||
research: z
|
|
||||||
.boolean()
|
|
||||||
.optional()
|
|
||||||
.describe('Whether to use research capabilities for task creation')
|
|
||||||
}),
|
|
||||||
execute: async (args, { log, session }) => {
|
|
||||||
try {
|
|
||||||
log.info(`Starting add-task with args: ${JSON.stringify(args)}`);
|
|
||||||
|
|
||||||
// Get project root from args or session
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
const rootFolder =
|
if (!rootFolder && args.projectRoot) {
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
rootFolder = args.projectRoot;
|
||||||
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure project root was determined
|
const directArgs = {
|
||||||
if (!rootFolder) {
|
projectRoot: rootFolder,
|
||||||
return createErrorResponse(
|
...args
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
};
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the path to tasks.json
|
const operationId = asyncManager.addOperation(addTaskDirect, directArgs, context);
|
||||||
let tasksJsonPath;
|
|
||||||
try {
|
log.info(`Started background operation for add_task. Operation ID: ${operationId}`);
|
||||||
tasksJsonPath = findTasksJsonPath(
|
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
|
||||||
log
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the direct function
|
return createContentResponse({
|
||||||
const result = await addTaskDirect(
|
message: "Add task operation started successfully.",
|
||||||
{
|
operationId: operationId
|
||||||
// Pass the explicitly resolved path
|
});
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
// Pass other relevant args
|
|
||||||
prompt: args.prompt,
|
|
||||||
dependencies: args.dependencies,
|
|
||||||
priority: args.priority,
|
|
||||||
research: args.research
|
|
||||||
},
|
|
||||||
log,
|
|
||||||
{ session }
|
|
||||||
);
|
|
||||||
|
|
||||||
// Return the result
|
} catch (error) {
|
||||||
return handleApiResult(result, log);
|
log.error(`Error initiating add_task operation: ${error.message}`, { stack: error.stack });
|
||||||
} catch (error) {
|
return createErrorResponse(`Failed to start add task operation: ${error.message}`, "ADD_TASK_INIT_ERROR");
|
||||||
log.error(`Error in add-task tool: ${error.message}`);
|
}
|
||||||
return createErrorResponse(error.message);
|
}
|
||||||
}
|
});
|
||||||
}
|
}
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,121 +3,61 @@
|
|||||||
* Tool for analyzing task complexity and generating recommendations
|
* Tool for analyzing task complexity and generating recommendations
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { analyzeTaskComplexityDirect } from '../core/task-master-core.js';
|
import { analyzeTaskComplexityDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
import path from 'path';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the analyze tool with the MCP server
|
* Register the analyze tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerAnalyzeTool(server) {
|
export function registerAnalyzeTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'analyze_project_complexity',
|
name: "analyze_project_complexity",
|
||||||
description:
|
description: "Analyze task complexity and generate expansion recommendations",
|
||||||
'Analyze task complexity and generate expansion recommendations',
|
parameters: z.object({
|
||||||
parameters: z.object({
|
output: z.string().optional().describe("Output file path for the report (default: scripts/task-complexity-report.json)"),
|
||||||
output: z
|
model: z.string().optional().describe("LLM model to use for analysis (defaults to configured model)"),
|
||||||
.string()
|
threshold: z.union([z.number(), z.string()]).optional().describe("Minimum complexity score to recommend expansion (1-10) (default: 5)"),
|
||||||
.optional()
|
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||||
.describe(
|
research: z.boolean().optional().describe("Use Perplexity AI for research-backed complexity analysis"),
|
||||||
'Output file path for the report (default: scripts/task-complexity-report.json)'
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
),
|
}),
|
||||||
model: z
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
.string()
|
try {
|
||||||
.optional()
|
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
|
||||||
.describe(
|
// await reportProgress({ progress: 0 });
|
||||||
'LLM model to use for analysis (defaults to configured model)'
|
|
||||||
),
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
threshold: z.coerce
|
|
||||||
.number()
|
if (!rootFolder && args.projectRoot) {
|
||||||
.min(1)
|
rootFolder = args.projectRoot;
|
||||||
.max(10)
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
.optional()
|
}
|
||||||
.describe(
|
|
||||||
'Minimum complexity score to recommend expansion (1-10) (default: 5)'
|
const result = await analyzeTaskComplexityDirect({
|
||||||
),
|
projectRoot: rootFolder,
|
||||||
file: z
|
...args
|
||||||
.string()
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
.optional()
|
|
||||||
.describe(
|
// await reportProgress({ progress: 100 });
|
||||||
'Absolute path to the tasks file (default: tasks/tasks.json)'
|
|
||||||
),
|
if (result.success) {
|
||||||
research: z
|
log.info(`Task complexity analysis complete: ${result.data.message}`);
|
||||||
.boolean()
|
log.info(`Report summary: ${JSON.stringify(result.data.reportSummary)}`);
|
||||||
.optional()
|
} else {
|
||||||
.describe('Use Perplexity AI for research-backed complexity analysis'),
|
log.error(`Failed to analyze task complexity: ${result.error.message}`);
|
||||||
projectRoot: z
|
}
|
||||||
.string()
|
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
return handleApiResult(result, log, 'Error analyzing task complexity');
|
||||||
}),
|
} catch (error) {
|
||||||
execute: async (args, { log, session }) => {
|
log.error(`Error in analyze tool: ${error.message}`);
|
||||||
try {
|
return createErrorResponse(error.message);
|
||||||
log.info(
|
}
|
||||||
`Analyzing task complexity with args: ${JSON.stringify(args)}`
|
},
|
||||||
);
|
});
|
||||||
|
}
|
||||||
// Get project root from args or session
|
|
||||||
const rootFolder =
|
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
|
||||||
|
|
||||||
if (!rootFolder) {
|
|
||||||
return createErrorResponse(
|
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let tasksJsonPath;
|
|
||||||
try {
|
|
||||||
tasksJsonPath = findTasksJsonPath(
|
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
|
||||||
log
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const outputPath = args.output
|
|
||||||
? path.resolve(rootFolder, args.output)
|
|
||||||
: path.resolve(rootFolder, 'scripts', 'task-complexity-report.json');
|
|
||||||
|
|
||||||
const result = await analyzeTaskComplexityDirect(
|
|
||||||
{
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
outputPath: outputPath,
|
|
||||||
model: args.model,
|
|
||||||
threshold: args.threshold,
|
|
||||||
research: args.research
|
|
||||||
},
|
|
||||||
log,
|
|
||||||
{ session }
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Task complexity analysis complete: ${result.data.message}`);
|
|
||||||
log.info(
|
|
||||||
`Report summary: ${JSON.stringify(result.data.reportSummary)}`
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
log.error(
|
|
||||||
`Failed to analyze task complexity: ${result.error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error analyzing task complexity');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in analyze tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,96 +3,61 @@
|
|||||||
* Tool for clearing subtasks from parent tasks
|
* Tool for clearing subtasks from parent tasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { clearSubtasksDirect } from '../core/task-master-core.js';
|
import { clearSubtasksDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the clearSubtasks tool with the MCP server
|
* Register the clearSubtasks tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerClearSubtasksTool(server) {
|
export function registerClearSubtasksTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'clear_subtasks',
|
name: "clear_subtasks",
|
||||||
description: 'Clear subtasks from specified tasks',
|
description: "Clear subtasks from specified tasks",
|
||||||
parameters: z
|
parameters: z.object({
|
||||||
.object({
|
id: z.string().optional().describe("Task IDs (comma-separated) to clear subtasks from"),
|
||||||
id: z
|
all: z.boolean().optional().describe("Clear subtasks from all tasks"),
|
||||||
.string()
|
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||||
.optional()
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
.describe('Task IDs (comma-separated) to clear subtasks from'),
|
}).refine(data => data.id || data.all, {
|
||||||
all: z.boolean().optional().describe('Clear subtasks from all tasks'),
|
message: "Either 'id' or 'all' parameter must be provided",
|
||||||
file: z
|
path: ["id", "all"]
|
||||||
.string()
|
}),
|
||||||
.optional()
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
.describe(
|
try {
|
||||||
'Absolute path to the tasks file (default: tasks/tasks.json)'
|
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
||||||
),
|
await reportProgress({ progress: 0 });
|
||||||
projectRoot: z
|
|
||||||
.string()
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
|
||||||
})
|
if (!rootFolder && args.projectRoot) {
|
||||||
.refine((data) => data.id || data.all, {
|
rootFolder = args.projectRoot;
|
||||||
message: "Either 'id' or 'all' parameter must be provided",
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
path: ['id', 'all']
|
}
|
||||||
}),
|
|
||||||
execute: async (args, { log, session }) => {
|
const result = await clearSubtasksDirect({
|
||||||
try {
|
projectRoot: rootFolder,
|
||||||
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
...args
|
||||||
|
}, log, { reportProgress, mcpLog: log, session});
|
||||||
// Get project root from args or session
|
|
||||||
const rootFolder =
|
reportProgress({ progress: 100 });
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
|
||||||
|
if (result.success) {
|
||||||
// Ensure project root was determined
|
log.info(`Subtasks cleared successfully: ${result.data.message}`);
|
||||||
if (!rootFolder) {
|
} else {
|
||||||
return createErrorResponse(
|
log.error(`Failed to clear subtasks: ${result.error.message}`);
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
}
|
||||||
);
|
|
||||||
}
|
return handleApiResult(result, log, 'Error clearing subtasks');
|
||||||
|
} catch (error) {
|
||||||
// Resolve the path to tasks.json
|
log.error(`Error in clearSubtasks tool: ${error.message}`);
|
||||||
let tasksJsonPath;
|
return createErrorResponse(error.message);
|
||||||
try {
|
}
|
||||||
tasksJsonPath = findTasksJsonPath(
|
},
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
});
|
||||||
log
|
}
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await clearSubtasksDirect(
|
|
||||||
{
|
|
||||||
// Pass the explicitly resolved path
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
// Pass other relevant args
|
|
||||||
id: args.id,
|
|
||||||
all: args.all
|
|
||||||
},
|
|
||||||
log
|
|
||||||
// Remove context object as clearSubtasksDirect likely doesn't need session/reportProgress
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Subtasks cleared successfully: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(`Failed to clear subtasks: ${result.error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error clearing subtasks');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in clearSubtasks tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,87 +3,56 @@
|
|||||||
* Tool for displaying the complexity analysis report
|
* Tool for displaying the complexity analysis report
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { complexityReportDirect } from '../core/task-master-core.js';
|
import { complexityReportDirect } from "../core/task-master-core.js";
|
||||||
import path from 'path';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the complexityReport tool with the MCP server
|
* Register the complexityReport tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerComplexityReportTool(server) {
|
export function registerComplexityReportTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'complexity_report',
|
name: "complexity_report",
|
||||||
description: 'Display the complexity analysis report in a readable format',
|
description: "Display the complexity analysis report in a readable format",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
file: z
|
file: z.string().optional().describe("Path to the report file (default: scripts/task-complexity-report.json)"),
|
||||||
.string()
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
.optional()
|
}),
|
||||||
.describe(
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
'Path to the report file (default: scripts/task-complexity-report.json)'
|
try {
|
||||||
),
|
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
|
||||||
projectRoot: z
|
// await reportProgress({ progress: 0 });
|
||||||
.string()
|
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
}),
|
|
||||||
execute: async (args, { log, session }) => {
|
if (!rootFolder && args.projectRoot) {
|
||||||
try {
|
rootFolder = args.projectRoot;
|
||||||
log.info(
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
`Getting complexity report with args: ${JSON.stringify(args)}`
|
}
|
||||||
);
|
|
||||||
|
const result = await complexityReportDirect({
|
||||||
// Get project root from args or session
|
projectRoot: rootFolder,
|
||||||
const rootFolder =
|
...args
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
|
|
||||||
// Ensure project root was determined
|
// await reportProgress({ progress: 100 });
|
||||||
if (!rootFolder) {
|
|
||||||
return createErrorResponse(
|
if (result.success) {
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
log.info(`Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}`);
|
||||||
);
|
} else {
|
||||||
}
|
log.error(`Failed to retrieve complexity report: ${result.error.message}`);
|
||||||
|
}
|
||||||
// Resolve the path to the complexity report file
|
|
||||||
// Default to scripts/task-complexity-report.json relative to root
|
return handleApiResult(result, log, 'Error retrieving complexity report');
|
||||||
const reportPath = args.file
|
} catch (error) {
|
||||||
? path.resolve(rootFolder, args.file)
|
log.error(`Error in complexity-report tool: ${error.message}`);
|
||||||
: path.resolve(rootFolder, 'scripts', 'task-complexity-report.json');
|
return createErrorResponse(`Failed to retrieve complexity report: ${error.message}`);
|
||||||
|
}
|
||||||
const result = await complexityReportDirect(
|
},
|
||||||
{
|
});
|
||||||
// Pass the explicitly resolved path
|
}
|
||||||
reportPath: reportPath
|
|
||||||
// No other args specific to this tool
|
|
||||||
},
|
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(
|
|
||||||
`Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}`
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
log.error(
|
|
||||||
`Failed to retrieve complexity report: ${result.error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(
|
|
||||||
result,
|
|
||||||
log,
|
|
||||||
'Error retrieving complexity report'
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in complexity-report tool: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to retrieve complexity report: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,110 +3,60 @@
|
|||||||
* Tool for expanding all pending tasks with subtasks
|
* Tool for expanding all pending tasks with subtasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { expandAllTasksDirect } from '../core/task-master-core.js';
|
import { expandAllTasksDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the expandAll tool with the MCP server
|
* Register the expandAll tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerExpandAllTool(server) {
|
export function registerExpandAllTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'expand_all',
|
name: "expand_all",
|
||||||
description: 'Expand all pending tasks into subtasks',
|
description: "Expand all pending tasks into subtasks",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
num: z
|
num: z.union([z.number(), z.string()]).optional().describe("Number of subtasks to generate for each task"),
|
||||||
.string()
|
research: z.boolean().optional().describe("Enable Perplexity AI for research-backed subtask generation"),
|
||||||
.optional()
|
prompt: z.string().optional().describe("Additional context to guide subtask generation"),
|
||||||
.describe('Number of subtasks to generate for each task'),
|
force: z.boolean().optional().describe("Force regeneration of subtasks for tasks that already have them"),
|
||||||
research: z
|
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||||
.boolean()
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
.optional()
|
}),
|
||||||
.describe(
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
'Enable Perplexity AI for research-backed subtask generation'
|
try {
|
||||||
),
|
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
||||||
prompt: z
|
// await reportProgress({ progress: 0 });
|
||||||
.string()
|
|
||||||
.optional()
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
.describe('Additional context to guide subtask generation'),
|
|
||||||
force: z
|
if (!rootFolder && args.projectRoot) {
|
||||||
.boolean()
|
rootFolder = args.projectRoot;
|
||||||
.optional()
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
.describe(
|
}
|
||||||
'Force regeneration of subtasks for tasks that already have them'
|
|
||||||
),
|
const result = await expandAllTasksDirect({
|
||||||
file: z
|
projectRoot: rootFolder,
|
||||||
.string()
|
...args
|
||||||
.optional()
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
.describe(
|
|
||||||
'Absolute path to the tasks file (default: tasks/tasks.json)'
|
// await reportProgress({ progress: 100 });
|
||||||
),
|
|
||||||
projectRoot: z
|
if (result.success) {
|
||||||
.string()
|
log.info(`Successfully expanded all tasks: ${result.data.message}`);
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
} else {
|
||||||
}),
|
log.error(`Failed to expand all tasks: ${result.error?.message || 'Unknown error'}`);
|
||||||
execute: async (args, { log, session }) => {
|
}
|
||||||
try {
|
|
||||||
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
return handleApiResult(result, log, 'Error expanding all tasks');
|
||||||
|
} catch (error) {
|
||||||
// Get project root from args or session
|
log.error(`Error in expand-all tool: ${error.message}`);
|
||||||
const rootFolder =
|
return createErrorResponse(error.message);
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
}
|
||||||
|
},
|
||||||
// Ensure project root was determined
|
});
|
||||||
if (!rootFolder) {
|
}
|
||||||
return createErrorResponse(
|
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve the path to tasks.json
|
|
||||||
let tasksJsonPath;
|
|
||||||
try {
|
|
||||||
tasksJsonPath = findTasksJsonPath(
|
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
|
||||||
log
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await expandAllTasksDirect(
|
|
||||||
{
|
|
||||||
// Pass the explicitly resolved path
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
// Pass other relevant args
|
|
||||||
num: args.num,
|
|
||||||
research: args.research,
|
|
||||||
prompt: args.prompt,
|
|
||||||
force: args.force
|
|
||||||
},
|
|
||||||
log,
|
|
||||||
{ session }
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Successfully expanded all tasks: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(
|
|
||||||
`Failed to expand all tasks: ${result.error?.message || 'Unknown error'}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error expanding all tasks');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in expand-all tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,96 +3,66 @@
|
|||||||
* Tool to expand a task into subtasks
|
* Tool to expand a task into subtasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { expandTaskDirect } from '../core/task-master-core.js';
|
import { expandTaskDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
import fs from 'fs';
|
|
||||||
import path from 'path';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the expand-task tool with the MCP server
|
* Register the expand-task tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerExpandTaskTool(server) {
|
export function registerExpandTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'expand_task',
|
name: "expand_task",
|
||||||
description: 'Expand a task into subtasks for detailed implementation',
|
description: "Expand a task into subtasks for detailed implementation",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe('ID of task to expand'),
|
id: z.string().describe("ID of task to expand"),
|
||||||
num: z.string().optional().describe('Number of subtasks to generate'),
|
num: z.union([z.number(), z.string()]).optional().describe("Number of subtasks to generate"),
|
||||||
research: z
|
research: z.boolean().optional().describe("Use Perplexity AI for research-backed generation"),
|
||||||
.boolean()
|
prompt: z.string().optional().describe("Additional context for subtask generation"),
|
||||||
.optional()
|
force: z.boolean().optional().describe("Force regeneration even for tasks that already have subtasks"),
|
||||||
.describe('Use Perplexity AI for research-backed generation'),
|
file: z.string().optional().describe("Path to the tasks file"),
|
||||||
prompt: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
.optional()
|
||||||
.describe('Additional context for subtask generation'),
|
.describe(
|
||||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
"Root directory of the project (default: current working directory)"
|
||||||
projectRoot: z
|
),
|
||||||
.string()
|
}),
|
||||||
.describe('The directory of the project. Must be an absolute path.'),
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
force: z.boolean().optional().describe('Force the expansion')
|
try {
|
||||||
}),
|
log.info(`Expanding task with args: ${JSON.stringify(args)}`);
|
||||||
execute: async (args, { log, session }) => {
|
// await reportProgress({ progress: 0 });
|
||||||
try {
|
|
||||||
log.info(`Starting expand-task with args: ${JSON.stringify(args)}`);
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
// Get project root from args or session
|
if (!rootFolder && args.projectRoot) {
|
||||||
const rootFolder =
|
rootFolder = args.projectRoot;
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
}
|
||||||
// Ensure project root was determined
|
|
||||||
if (!rootFolder) {
|
const result = await expandTaskDirect({
|
||||||
return createErrorResponse(
|
projectRoot: rootFolder,
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
...args
|
||||||
);
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
}
|
|
||||||
|
// await reportProgress({ progress: 100 });
|
||||||
log.info(`Project root resolved to: ${rootFolder}`);
|
|
||||||
|
if (result.success) {
|
||||||
// Resolve the path to tasks.json using the utility
|
log.info(`Successfully expanded task with ID ${args.id}`);
|
||||||
let tasksJsonPath;
|
} else {
|
||||||
try {
|
log.error(`Failed to expand task: ${result.error?.message || 'Unknown error'}`);
|
||||||
tasksJsonPath = findTasksJsonPath(
|
}
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
|
||||||
log
|
return handleApiResult(result, log, 'Error expanding task');
|
||||||
);
|
} catch (error) {
|
||||||
} catch (error) {
|
log.error(`Error in expand task tool: ${error.message}`);
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
return createErrorResponse(error.message);
|
||||||
return createErrorResponse(
|
}
|
||||||
`Failed to find tasks.json: ${error.message}`
|
},
|
||||||
);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call direct function with only session in the context, not reportProgress
|
|
||||||
// Use the pattern recommended in the MCP guidelines
|
|
||||||
const result = await expandTaskDirect(
|
|
||||||
{
|
|
||||||
// Pass the explicitly resolved path
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
// Pass other relevant args
|
|
||||||
id: args.id,
|
|
||||||
num: args.num,
|
|
||||||
research: args.research,
|
|
||||||
prompt: args.prompt,
|
|
||||||
force: args.force // Need to add force to parameters
|
|
||||||
},
|
|
||||||
log,
|
|
||||||
{ session }
|
|
||||||
); // Only pass session, NOT reportProgress
|
|
||||||
|
|
||||||
// Return the result
|
|
||||||
return handleApiResult(result, log, 'Error expanding task');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in expand task tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,74 +3,56 @@
|
|||||||
* Tool for automatically fixing invalid task dependencies
|
* Tool for automatically fixing invalid task dependencies
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { fixDependenciesDirect } from '../core/task-master-core.js';
|
import { fixDependenciesDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the fixDependencies tool with the MCP server
|
* Register the fixDependencies tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerFixDependenciesTool(server) {
|
export function registerFixDependenciesTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'fix_dependencies',
|
name: "fix_dependencies",
|
||||||
description: 'Fix invalid dependencies in tasks automatically',
|
description: "Fix invalid dependencies in tasks automatically",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
file: z.string().optional().describe("Path to the tasks file"),
|
||||||
projectRoot: z
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
.string()
|
}),
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
}),
|
try {
|
||||||
execute: async (args, { log, session }) => {
|
log.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);
|
||||||
try {
|
await reportProgress({ progress: 0 });
|
||||||
log.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);
|
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
// Get project root from args or session
|
|
||||||
const rootFolder =
|
if (!rootFolder && args.projectRoot) {
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
rootFolder = args.projectRoot;
|
||||||
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
if (!rootFolder) {
|
}
|
||||||
return createErrorResponse(
|
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
const result = await fixDependenciesDirect({
|
||||||
);
|
projectRoot: rootFolder,
|
||||||
}
|
...args
|
||||||
|
}, log, { reportProgress, mcpLog: log, session});
|
||||||
let tasksJsonPath;
|
|
||||||
try {
|
await reportProgress({ progress: 100 });
|
||||||
tasksJsonPath = findTasksJsonPath(
|
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
if (result.success) {
|
||||||
log
|
log.info(`Successfully fixed dependencies: ${result.data.message}`);
|
||||||
);
|
} else {
|
||||||
} catch (error) {
|
log.error(`Failed to fix dependencies: ${result.error.message}`);
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
}
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
return handleApiResult(result, log, 'Error fixing dependencies');
|
||||||
);
|
} catch (error) {
|
||||||
}
|
log.error(`Error in fixDependencies tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
const result = await fixDependenciesDirect(
|
}
|
||||||
{
|
}
|
||||||
tasksJsonPath: tasksJsonPath
|
});
|
||||||
},
|
}
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Successfully fixed dependencies: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(`Failed to fix dependencies: ${result.error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error fixing dependencies');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in fixDependencies tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,92 +3,62 @@
|
|||||||
* Tool to generate individual task files from tasks.json
|
* Tool to generate individual task files from tasks.json
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { generateTaskFilesDirect } from '../core/task-master-core.js';
|
import { generateTaskFilesDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
import path from 'path';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the generate tool with the MCP server
|
* Register the generate tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerGenerateTool(server) {
|
export function registerGenerateTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'generate',
|
name: "generate",
|
||||||
description:
|
description: "Generates individual task files in tasks/ directory based on tasks.json",
|
||||||
'Generates individual task files in tasks/ directory based on tasks.json',
|
parameters: z.object({
|
||||||
parameters: z.object({
|
file: z.string().optional().describe("Path to the tasks file"),
|
||||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
output: z.string().optional().describe("Output directory (default: same directory as tasks file)"),
|
||||||
output: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
.optional()
|
||||||
.describe('Output directory (default: same directory as tasks file)'),
|
.describe(
|
||||||
projectRoot: z
|
"Root directory of the project (default: current working directory)"
|
||||||
.string()
|
),
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
}),
|
||||||
}),
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
execute: async (args, { log, session }) => {
|
try {
|
||||||
try {
|
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
||||||
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
// await reportProgress({ progress: 0 });
|
||||||
|
|
||||||
// Get project root from args or session
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
const rootFolder =
|
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
if (!rootFolder && args.projectRoot) {
|
||||||
|
rootFolder = args.projectRoot;
|
||||||
// Ensure project root was determined
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
if (!rootFolder) {
|
}
|
||||||
return createErrorResponse(
|
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
const result = await generateTaskFilesDirect({
|
||||||
);
|
projectRoot: rootFolder,
|
||||||
}
|
...args
|
||||||
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
// Resolve the path to tasks.json
|
|
||||||
let tasksJsonPath;
|
// await reportProgress({ progress: 100 });
|
||||||
try {
|
|
||||||
tasksJsonPath = findTasksJsonPath(
|
if (result.success) {
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
log.info(`Successfully generated task files: ${result.data.message}`);
|
||||||
log
|
} else {
|
||||||
);
|
log.error(`Failed to generate task files: ${result.error?.message || 'Unknown error'}`);
|
||||||
} catch (error) {
|
}
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
return handleApiResult(result, log, 'Error generating task files');
|
||||||
`Failed to find tasks.json: ${error.message}`
|
} catch (error) {
|
||||||
);
|
log.error(`Error in generate tool: ${error.message}`);
|
||||||
}
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
// Determine output directory: use explicit arg or default to tasks.json directory
|
},
|
||||||
const outputDir = args.output
|
});
|
||||||
? path.resolve(rootFolder, args.output) // Resolve relative to root if needed
|
}
|
||||||
: path.dirname(tasksJsonPath);
|
|
||||||
|
|
||||||
const result = await generateTaskFilesDirect(
|
|
||||||
{
|
|
||||||
// Pass the explicitly resolved paths
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
outputDir: outputDir
|
|
||||||
// No other args specific to this tool
|
|
||||||
},
|
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Successfully generated task files: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(
|
|
||||||
`Failed to generate task files: ${result.error?.message || 'Unknown error'}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error generating task files');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in generate tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -8,40 +8,35 @@ import { createErrorResponse, createContentResponse } from './utils.js'; // Assu
|
|||||||
* @param {AsyncOperationManager} asyncManager - The async operation manager.
|
* @param {AsyncOperationManager} asyncManager - The async operation manager.
|
||||||
*/
|
*/
|
||||||
export function registerGetOperationStatusTool(server, asyncManager) {
|
export function registerGetOperationStatusTool(server, asyncManager) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'get_operation_status',
|
name: 'get_operation_status',
|
||||||
description:
|
description: 'Retrieves the status and result/error of a background operation.',
|
||||||
'Retrieves the status and result/error of a background operation.',
|
parameters: z.object({
|
||||||
parameters: z.object({
|
operationId: z.string().describe('The ID of the operation to check.'),
|
||||||
operationId: z.string().describe('The ID of the operation to check.')
|
}),
|
||||||
}),
|
execute: async (args, { log }) => {
|
||||||
execute: async (args, { log }) => {
|
try {
|
||||||
try {
|
const { operationId } = args;
|
||||||
const { operationId } = args;
|
log.info(`Checking status for operation ID: ${operationId}`);
|
||||||
log.info(`Checking status for operation ID: ${operationId}`);
|
|
||||||
|
|
||||||
const status = asyncManager.getStatus(operationId);
|
const status = asyncManager.getStatus(operationId);
|
||||||
|
|
||||||
// Status will now always return an object, but it might have status='not_found'
|
// Status will now always return an object, but it might have status='not_found'
|
||||||
if (status.status === 'not_found') {
|
if (status.status === 'not_found') {
|
||||||
log.warn(`Operation ID not found: ${operationId}`);
|
log.warn(`Operation ID not found: ${operationId}`);
|
||||||
return createErrorResponse(
|
return createErrorResponse(
|
||||||
status.error?.message || `Operation ID not found: ${operationId}`,
|
status.error?.message || `Operation ID not found: ${operationId}`,
|
||||||
status.error?.code || 'OPERATION_NOT_FOUND'
|
status.error?.code || 'OPERATION_NOT_FOUND'
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(`Status for ${operationId}: ${status.status}`);
|
log.info(`Status for ${operationId}: ${status.status}`);
|
||||||
return createContentResponse(status);
|
return createContentResponse(status);
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in get_operation_status tool: ${error.message}`, {
|
} catch (error) {
|
||||||
stack: error.stack
|
log.error(`Error in get_operation_status tool: ${error.message}`, { stack: error.stack });
|
||||||
});
|
return createErrorResponse(`Failed to get operation status: ${error.message}`, 'GET_STATUS_ERROR');
|
||||||
return createErrorResponse(
|
}
|
||||||
`Failed to get operation status: ${error.message}`,
|
},
|
||||||
'GET_STATUS_ERROR'
|
});
|
||||||
);
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,14 +3,13 @@
|
|||||||
* Tool to get task details by ID
|
* Tool to get task details by ID
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { showTaskDirect } from '../core/task-master-core.js';
|
import { showTaskDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Custom processor function that removes allTasks from the response
|
* Custom processor function that removes allTasks from the response
|
||||||
@@ -18,16 +17,16 @@ import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|||||||
* @returns {Object} - The processed data with allTasks removed
|
* @returns {Object} - The processed data with allTasks removed
|
||||||
*/
|
*/
|
||||||
function processTaskResponse(data) {
|
function processTaskResponse(data) {
|
||||||
if (!data) return data;
|
if (!data) return data;
|
||||||
|
|
||||||
// If we have the expected structure with task and allTasks
|
// If we have the expected structure with task and allTasks
|
||||||
if (data.task) {
|
if (data.task) {
|
||||||
// Return only the task object, removing the allTasks array
|
// Return only the task object, removing the allTasks array
|
||||||
return data.task;
|
return data.task;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If structure is unexpected, return as is
|
// If structure is unexpected, return as is
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -35,89 +34,59 @@ function processTaskResponse(data) {
|
|||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerShowTaskTool(server) {
|
export function registerShowTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'get_task',
|
name: "get_task",
|
||||||
description: 'Get detailed information about a specific task',
|
description: "Get detailed information about a specific task",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe('Task ID to get'),
|
id: z.string().describe("Task ID to get"),
|
||||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
file: z.string().optional().describe("Path to the tasks file"),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
.optional()
|
||||||
}),
|
.describe(
|
||||||
execute: async (args, { log, session }) => {
|
"Root directory of the project (default: current working directory)"
|
||||||
// Log the session right at the start of execute
|
),
|
||||||
log.info(
|
}),
|
||||||
`Session object received in execute: ${JSON.stringify(session)}`
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
); // Use JSON.stringify for better visibility
|
// Log the session right at the start of execute
|
||||||
|
log.info(`Session object received in execute: ${JSON.stringify(session)}`); // Use JSON.stringify for better visibility
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log.info(`Getting task details for ID: ${args.id}`);
|
log.info(`Getting task details for ID: ${args.id}`);
|
||||||
|
|
||||||
log.info(
|
log.info(`Session object received in execute: ${JSON.stringify(session)}`); // Use JSON.stringify for better visibility
|
||||||
`Session object received in execute: ${JSON.stringify(session)}`
|
|
||||||
); // Use JSON.stringify for better visibility
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
|
if (!rootFolder && args.projectRoot) {
|
||||||
|
rootFolder = args.projectRoot;
|
||||||
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
} else if (!rootFolder) {
|
||||||
|
// Ensure we always have *some* root, even if session failed and args didn't provide one
|
||||||
|
rootFolder = process.cwd();
|
||||||
|
log.warn(`Session and args failed to provide root, using CWD: ${rootFolder}`);
|
||||||
|
}
|
||||||
|
|
||||||
// Get project root from args or session
|
log.info(`Attempting to use project root: ${rootFolder}`); // Log the final resolved root
|
||||||
const rootFolder =
|
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
|
||||||
|
|
||||||
// Ensure project root was determined
|
log.info(`Root folder: ${rootFolder}`); // Log the final resolved root
|
||||||
if (!rootFolder) {
|
const result = await showTaskDirect({
|
||||||
return createErrorResponse(
|
projectRoot: rootFolder,
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
...args
|
||||||
);
|
}, log);
|
||||||
}
|
|
||||||
|
if (result.success) {
|
||||||
log.info(`Attempting to use project root: ${rootFolder}`); // Log the final resolved root
|
log.info(`Successfully retrieved task details for ID: ${args.id}${result.fromCache ? ' (from cache)' : ''}`);
|
||||||
|
} else {
|
||||||
log.info(`Root folder: ${rootFolder}`); // Log the final resolved root
|
log.error(`Failed to get task: ${result.error.message}`);
|
||||||
|
}
|
||||||
// Resolve the path to tasks.json
|
|
||||||
let tasksJsonPath;
|
// Use our custom processor function to remove allTasks from the response
|
||||||
try {
|
return handleApiResult(result, log, 'Error retrieving task details', processTaskResponse);
|
||||||
tasksJsonPath = findTasksJsonPath(
|
} catch (error) {
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
log.error(`Error in get-task tool: ${error.message}\n${error.stack}`); // Add stack trace
|
||||||
log
|
return createErrorResponse(`Failed to get task: ${error.message}`);
|
||||||
);
|
}
|
||||||
} catch (error) {
|
},
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
});
|
||||||
return createErrorResponse(
|
}
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info(`Attempting to use tasks file path: ${tasksJsonPath}`);
|
|
||||||
|
|
||||||
const result = await showTaskDirect(
|
|
||||||
{
|
|
||||||
// Pass the explicitly resolved path
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
// Pass other relevant args
|
|
||||||
id: args.id
|
|
||||||
},
|
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(
|
|
||||||
`Successfully retrieved task details for ID: ${args.id}${result.fromCache ? ' (from cache)' : ''}`
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
log.error(`Failed to get task: ${result.error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use our custom processor function to remove allTasks from the response
|
|
||||||
return handleApiResult(
|
|
||||||
result,
|
|
||||||
log,
|
|
||||||
'Error retrieving task details',
|
|
||||||
processTaskResponse
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in get-task tool: ${error.message}\n${error.stack}`); // Add stack trace
|
|
||||||
return createErrorResponse(`Failed to get task: ${error.message}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,94 +3,63 @@
|
|||||||
* Tool to get all tasks from Task Master
|
* Tool to get all tasks from Task Master
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { listTasksDirect } from '../core/task-master-core.js';
|
import { listTasksDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the getTasks tool with the MCP server
|
* Register the getTasks tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerListTasksTool(server) {
|
export function registerListTasksTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'get_tasks',
|
name: "get_tasks",
|
||||||
description:
|
description: "Get all tasks from Task Master, optionally filtering by status and including subtasks.",
|
||||||
'Get all tasks from Task Master, optionally filtering by status and including subtasks.',
|
parameters: z.object({
|
||||||
parameters: z.object({
|
status: z.string().optional().describe("Filter tasks by status (e.g., 'pending', 'done')"),
|
||||||
status: z
|
withSubtasks: z
|
||||||
.string()
|
.boolean()
|
||||||
.optional()
|
.optional()
|
||||||
.describe("Filter tasks by status (e.g., 'pending', 'done')"),
|
.describe("Include subtasks nested within their parent tasks in the response"),
|
||||||
withSubtasks: z
|
file: z.string().optional().describe("Path to the tasks file (relative to project root or absolute)"),
|
||||||
.boolean()
|
projectRoot: z
|
||||||
.optional()
|
.string()
|
||||||
.describe(
|
.optional()
|
||||||
'Include subtasks nested within their parent tasks in the response'
|
.describe(
|
||||||
),
|
"Root directory of the project (default: automatically detected from session or CWD)"
|
||||||
file: z
|
),
|
||||||
.string()
|
}),
|
||||||
.optional()
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
.describe(
|
try {
|
||||||
'Path to the tasks file (relative to project root or absolute)'
|
log.info(`Getting tasks with filters: ${JSON.stringify(args)}`);
|
||||||
),
|
// await reportProgress({ progress: 0 });
|
||||||
projectRoot: z
|
|
||||||
.string()
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
|
||||||
}),
|
if (!rootFolder && args.projectRoot) {
|
||||||
execute: async (args, { log, session }) => {
|
rootFolder = args.projectRoot;
|
||||||
try {
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
log.info(`Getting tasks with filters: ${JSON.stringify(args)}`);
|
}
|
||||||
|
|
||||||
// Get project root from args or session
|
const result = await listTasksDirect({
|
||||||
const rootFolder =
|
projectRoot: rootFolder,
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
...args
|
||||||
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
// Ensure project root was determined
|
|
||||||
if (!rootFolder) {
|
// await reportProgress({ progress: 100 });
|
||||||
return createErrorResponse(
|
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
log.info(`Retrieved ${result.success ? (result.data?.tasks?.length || 0) : 0} tasks${result.fromCache ? ' (from cache)' : ''}`);
|
||||||
);
|
return handleApiResult(result, log, 'Error getting tasks');
|
||||||
}
|
} catch (error) {
|
||||||
|
log.error(`Error getting tasks: ${error.message}`);
|
||||||
// Resolve the path to tasks.json
|
return createErrorResponse(error.message);
|
||||||
let tasksJsonPath;
|
}
|
||||||
try {
|
},
|
||||||
tasksJsonPath = findTasksJsonPath(
|
});
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
|
||||||
log
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
// Use the error message from findTasksJsonPath for better context
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await listTasksDirect(
|
|
||||||
{
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
status: args.status,
|
|
||||||
withSubtasks: args.withSubtasks
|
|
||||||
},
|
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
log.info(
|
|
||||||
`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks${result.fromCache ? ' (from cache)' : ''}`
|
|
||||||
);
|
|
||||||
return handleApiResult(result, log, 'Error getting tasks');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error getting tasks: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We no longer need the formatTasksResponse function as we're returning raw JSON data
|
// We no longer need the formatTasksResponse function as we're returning raw JSON data
|
||||||
|
|||||||
@@ -3,69 +3,73 @@
|
|||||||
* Export all Task Master CLI tools for MCP server
|
* Export all Task Master CLI tools for MCP server
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { registerListTasksTool } from './get-tasks.js';
|
import { registerListTasksTool } from "./get-tasks.js";
|
||||||
import logger from '../logger.js';
|
import logger from "../logger.js";
|
||||||
import { registerSetTaskStatusTool } from './set-task-status.js';
|
import { registerSetTaskStatusTool } from "./set-task-status.js";
|
||||||
import { registerParsePRDTool } from './parse-prd.js';
|
import { registerParsePRDTool } from "./parse-prd.js";
|
||||||
import { registerUpdateTool } from './update.js';
|
import { registerUpdateTool } from "./update.js";
|
||||||
import { registerUpdateTaskTool } from './update-task.js';
|
import { registerUpdateTaskTool } from "./update-task.js";
|
||||||
import { registerUpdateSubtaskTool } from './update-subtask.js';
|
import { registerUpdateSubtaskTool } from "./update-subtask.js";
|
||||||
import { registerGenerateTool } from './generate.js';
|
import { registerGenerateTool } from "./generate.js";
|
||||||
import { registerShowTaskTool } from './get-task.js';
|
import { registerShowTaskTool } from "./get-task.js";
|
||||||
import { registerNextTaskTool } from './next-task.js';
|
import { registerNextTaskTool } from "./next-task.js";
|
||||||
import { registerExpandTaskTool } from './expand-task.js';
|
import { registerExpandTaskTool } from "./expand-task.js";
|
||||||
import { registerAddTaskTool } from './add-task.js';
|
import { registerAddTaskTool } from "./add-task.js";
|
||||||
import { registerAddSubtaskTool } from './add-subtask.js';
|
import { registerAddSubtaskTool } from "./add-subtask.js";
|
||||||
import { registerRemoveSubtaskTool } from './remove-subtask.js';
|
import { registerRemoveSubtaskTool } from "./remove-subtask.js";
|
||||||
import { registerAnalyzeTool } from './analyze.js';
|
import { registerAnalyzeTool } from "./analyze.js";
|
||||||
import { registerClearSubtasksTool } from './clear-subtasks.js';
|
import { registerClearSubtasksTool } from "./clear-subtasks.js";
|
||||||
import { registerExpandAllTool } from './expand-all.js';
|
import { registerExpandAllTool } from "./expand-all.js";
|
||||||
import { registerRemoveDependencyTool } from './remove-dependency.js';
|
import { registerRemoveDependencyTool } from "./remove-dependency.js";
|
||||||
import { registerValidateDependenciesTool } from './validate-dependencies.js';
|
import { registerValidateDependenciesTool } from "./validate-dependencies.js";
|
||||||
import { registerFixDependenciesTool } from './fix-dependencies.js';
|
import { registerFixDependenciesTool } from "./fix-dependencies.js";
|
||||||
import { registerComplexityReportTool } from './complexity-report.js';
|
import { registerComplexityReportTool } from "./complexity-report.js";
|
||||||
import { registerAddDependencyTool } from './add-dependency.js';
|
import { registerAddDependencyTool } from "./add-dependency.js";
|
||||||
import { registerRemoveTaskTool } from './remove-task.js';
|
import { registerRemoveTaskTool } from './remove-task.js';
|
||||||
import { registerInitializeProjectTool } from './initialize-project.js';
|
import { registerInitializeProjectTool } from './initialize-project.js';
|
||||||
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
||||||
|
import { registerGetOperationStatusTool } from './get-operation-status.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register all Task Master tools with the MCP server
|
* Register all Task Master tools with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
* @param {asyncOperationManager} asyncManager - The async operation manager instance
|
* @param {asyncOperationManager} asyncManager - The async operation manager instance
|
||||||
*/
|
*/
|
||||||
export function registerTaskMasterTools(server, asyncManager) {
|
export function registerTaskMasterTools(server, asyncManager) {
|
||||||
try {
|
try {
|
||||||
// Register each tool
|
// Register each tool
|
||||||
registerListTasksTool(server);
|
registerListTasksTool(server);
|
||||||
registerSetTaskStatusTool(server);
|
registerSetTaskStatusTool(server);
|
||||||
registerParsePRDTool(server);
|
registerParsePRDTool(server);
|
||||||
registerUpdateTool(server);
|
registerUpdateTool(server);
|
||||||
registerUpdateTaskTool(server);
|
registerUpdateTaskTool(server);
|
||||||
registerUpdateSubtaskTool(server);
|
registerUpdateSubtaskTool(server);
|
||||||
registerGenerateTool(server);
|
registerGenerateTool(server);
|
||||||
registerShowTaskTool(server);
|
registerShowTaskTool(server);
|
||||||
registerNextTaskTool(server);
|
registerNextTaskTool(server);
|
||||||
registerExpandTaskTool(server);
|
registerExpandTaskTool(server);
|
||||||
registerAddTaskTool(server, asyncManager);
|
registerAddTaskTool(server, asyncManager);
|
||||||
registerAddSubtaskTool(server);
|
registerAddSubtaskTool(server);
|
||||||
registerRemoveSubtaskTool(server);
|
registerRemoveSubtaskTool(server);
|
||||||
registerAnalyzeTool(server);
|
registerAnalyzeTool(server);
|
||||||
registerClearSubtasksTool(server);
|
registerClearSubtasksTool(server);
|
||||||
registerExpandAllTool(server);
|
registerExpandAllTool(server);
|
||||||
registerRemoveDependencyTool(server);
|
registerRemoveDependencyTool(server);
|
||||||
registerValidateDependenciesTool(server);
|
registerValidateDependenciesTool(server);
|
||||||
registerFixDependenciesTool(server);
|
registerFixDependenciesTool(server);
|
||||||
registerComplexityReportTool(server);
|
registerComplexityReportTool(server);
|
||||||
registerAddDependencyTool(server);
|
registerAddDependencyTool(server);
|
||||||
registerRemoveTaskTool(server);
|
registerRemoveTaskTool(server);
|
||||||
registerInitializeProjectTool(server);
|
registerInitializeProjectTool(server);
|
||||||
} catch (error) {
|
registerGetOperationStatusTool(server, asyncManager);
|
||||||
logger.error(`Error registering Task Master tools: ${error.message}`);
|
} catch (error) {
|
||||||
throw error;
|
logger.error(`Error registering Task Master tools: ${error.message}`);
|
||||||
}
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info('Registered Task Master MCP tools');
|
||||||
}
|
}
|
||||||
|
|
||||||
export default {
|
export default {
|
||||||
registerTaskMasterTools
|
registerTaskMasterTools,
|
||||||
};
|
};
|
||||||
@@ -1,68 +1,62 @@
|
|||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import { execSync } from 'child_process';
|
||||||
createContentResponse,
|
import { createContentResponse, createErrorResponse } from "./utils.js"; // Only need response creators
|
||||||
createErrorResponse,
|
|
||||||
handleApiResult
|
|
||||||
} from './utils.js';
|
|
||||||
import { initializeProjectDirect } from '../core/task-master-core.js';
|
|
||||||
|
|
||||||
export function registerInitializeProjectTool(server) {
|
export function registerInitializeProjectTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'initialize_project',
|
name: "initialize_project", // snake_case for tool name
|
||||||
description:
|
description: "Initializes a new Task Master project structure in the current working directory by running 'task-master init'.",
|
||||||
'Initializes a new Task Master project structure by calling the core initialization logic. Creates necessary folders and configuration files for Task Master in the current directory.',
|
parameters: z.object({
|
||||||
parameters: z.object({
|
projectName: z.string().optional().describe("The name for the new project."),
|
||||||
skipInstall: z
|
projectDescription: z.string().optional().describe("A brief description for the project."),
|
||||||
.boolean()
|
projectVersion: z.string().optional().describe("The initial version for the project (e.g., '0.1.0')."),
|
||||||
.optional()
|
authorName: z.string().optional().describe("The author's name."),
|
||||||
.default(false)
|
skipInstall: z.boolean().optional().default(false).describe("Skip installing dependencies automatically."),
|
||||||
.describe(
|
addAliases: z.boolean().optional().default(false).describe("Add shell aliases (tm, taskmaster) to shell config file."),
|
||||||
'Skip installing dependencies automatically. Never do this unless you are sure the project is already installed.'
|
yes: z.boolean().optional().default(false).describe("Skip prompts and use default values or provided arguments."),
|
||||||
),
|
// projectRoot is not needed here as 'init' works on the current directory
|
||||||
addAliases: z
|
}),
|
||||||
.boolean()
|
execute: async (args, { log }) => { // Destructure context to get log
|
||||||
.optional()
|
try {
|
||||||
.default(false)
|
log.info(`Executing initialize_project with args: ${JSON.stringify(args)}`);
|
||||||
.describe('Add shell aliases (tm, taskmaster) to shell config file.'),
|
|
||||||
yes: z
|
|
||||||
.boolean()
|
|
||||||
.optional()
|
|
||||||
.default(true)
|
|
||||||
.describe(
|
|
||||||
'Skip prompts and use default values. Always set to true for MCP tools.'
|
|
||||||
),
|
|
||||||
projectRoot: z
|
|
||||||
.string()
|
|
||||||
.describe(
|
|
||||||
'The root directory for the project. ALWAYS SET THIS TO THE PROJECT ROOT DIRECTORY. IF NOT SET, THE TOOL WILL NOT WORK.'
|
|
||||||
)
|
|
||||||
}),
|
|
||||||
execute: async (args, context) => {
|
|
||||||
const { log } = context;
|
|
||||||
const session = context.session;
|
|
||||||
|
|
||||||
log.info(
|
// Construct the command arguments carefully
|
||||||
'>>> Full Context Received by Tool:',
|
// Using npx ensures it uses the locally installed version if available, or fetches it
|
||||||
JSON.stringify(context, null, 2)
|
let command = 'npx task-master init';
|
||||||
);
|
const cliArgs = [];
|
||||||
log.info(`Context received in tool function: ${context}`);
|
if (args.projectName) cliArgs.push(`--name "${args.projectName.replace(/"/g, '\\"')}"`); // Escape quotes
|
||||||
log.info(
|
if (args.projectDescription) cliArgs.push(`--description "${args.projectDescription.replace(/"/g, '\\"')}"`);
|
||||||
`Session received in tool function: ${session ? session : 'undefined'}`
|
if (args.projectVersion) cliArgs.push(`--version "${args.projectVersion.replace(/"/g, '\\"')}"`);
|
||||||
);
|
if (args.authorName) cliArgs.push(`--author "${args.authorName.replace(/"/g, '\\"')}"`);
|
||||||
|
if (args.skipInstall) cliArgs.push('--skip-install');
|
||||||
|
if (args.addAliases) cliArgs.push('--aliases');
|
||||||
|
if (args.yes) cliArgs.push('--yes');
|
||||||
|
|
||||||
try {
|
command += ' ' + cliArgs.join(' ');
|
||||||
log.info(
|
|
||||||
`Executing initialize_project tool with args: ${JSON.stringify(args)}`
|
|
||||||
);
|
|
||||||
|
|
||||||
const result = await initializeProjectDirect(args, log, { session });
|
log.info(`Constructed command: ${command}`);
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Initialization failed');
|
// Execute the command in the current working directory of the server process
|
||||||
} catch (error) {
|
// Capture stdout/stderr. Use a reasonable timeout (e.g., 5 minutes)
|
||||||
const errorMessage = `Project initialization tool failed: ${error.message || 'Unknown error'}`;
|
const output = execSync(command, { encoding: 'utf8', stdio: 'pipe', timeout: 300000 });
|
||||||
log.error(errorMessage, error);
|
|
||||||
return createErrorResponse(errorMessage, { details: error.stack });
|
log.info(`Initialization output:\n${output}`);
|
||||||
}
|
|
||||||
}
|
// Return a standard success response manually
|
||||||
});
|
return createContentResponse(
|
||||||
}
|
"Project initialized successfully.",
|
||||||
|
{ output: output } // Include output in the data payload
|
||||||
|
);
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
// Catch errors from execSync or timeouts
|
||||||
|
const errorMessage = `Project initialization failed: ${error.message}`;
|
||||||
|
const errorDetails = error.stderr?.toString() || error.stdout?.toString() || error.message; // Provide stderr/stdout if available
|
||||||
|
log.error(`${errorMessage}\nDetails: ${errorDetails}`);
|
||||||
|
|
||||||
|
// Return a standard error response manually
|
||||||
|
return createErrorResponse(errorMessage, { details: errorDetails });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -3,83 +3,61 @@
|
|||||||
* Tool to find the next task to work on
|
* Tool to find the next task to work on
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { nextTaskDirect } from '../core/task-master-core.js';
|
import { nextTaskDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the next-task tool with the MCP server
|
* Register the next-task tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerNextTaskTool(server) {
|
export function registerNextTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'next_task',
|
name: "next_task",
|
||||||
description:
|
description: "Find the next task to work on based on dependencies and status",
|
||||||
'Find the next task to work on based on dependencies and status',
|
parameters: z.object({
|
||||||
parameters: z.object({
|
file: z.string().optional().describe("Path to the tasks file"),
|
||||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
projectRoot: z
|
||||||
projectRoot: z
|
.string()
|
||||||
.string()
|
.optional()
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
.describe(
|
||||||
}),
|
"Root directory of the project (default: current working directory)"
|
||||||
execute: async (args, { log, session }) => {
|
),
|
||||||
try {
|
}),
|
||||||
log.info(`Finding next task with args: ${JSON.stringify(args)}`);
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
|
try {
|
||||||
// Get project root from args or session
|
log.info(`Finding next task with args: ${JSON.stringify(args)}`);
|
||||||
const rootFolder =
|
// await reportProgress({ progress: 0 });
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
// Ensure project root was determined
|
|
||||||
if (!rootFolder) {
|
if (!rootFolder && args.projectRoot) {
|
||||||
return createErrorResponse(
|
rootFolder = args.projectRoot;
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
);
|
}
|
||||||
}
|
|
||||||
|
const result = await nextTaskDirect({
|
||||||
// Resolve the path to tasks.json
|
projectRoot: rootFolder,
|
||||||
let tasksJsonPath;
|
...args
|
||||||
try {
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
tasksJsonPath = findTasksJsonPath(
|
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
// await reportProgress({ progress: 100 });
|
||||||
log
|
|
||||||
);
|
if (result.success) {
|
||||||
} catch (error) {
|
log.info(`Successfully found next task: ${result.data?.task?.id || 'No available tasks'}`);
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
} else {
|
||||||
return createErrorResponse(
|
log.error(`Failed to find next task: ${result.error?.message || 'Unknown error'}`);
|
||||||
`Failed to find tasks.json: ${error.message}`
|
}
|
||||||
);
|
|
||||||
}
|
return handleApiResult(result, log, 'Error finding next task');
|
||||||
|
} catch (error) {
|
||||||
const result = await nextTaskDirect(
|
log.error(`Error in nextTask tool: ${error.message}`);
|
||||||
{
|
return createErrorResponse(error.message);
|
||||||
// Pass the explicitly resolved path
|
}
|
||||||
tasksJsonPath: tasksJsonPath
|
},
|
||||||
// No other args specific to this tool
|
});
|
||||||
},
|
}
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(
|
|
||||||
`Successfully found next task: ${result.data?.task?.id || 'No available tasks'}`
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
log.error(
|
|
||||||
`Failed to find next task: ${result.error?.message || 'Unknown error'}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error finding next task');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in nextTask tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,115 +3,63 @@
|
|||||||
* Tool to parse PRD document and generate tasks
|
* Tool to parse PRD document and generate tasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
getProjectRootFromSession,
|
handleApiResult,
|
||||||
handleApiResult,
|
createErrorResponse,
|
||||||
createErrorResponse
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { parsePRDDirect } from '../core/task-master-core.js';
|
import { parsePRDDirect } from "../core/task-master-core.js";
|
||||||
import {
|
|
||||||
resolveProjectPaths,
|
|
||||||
findPRDDocumentPath,
|
|
||||||
resolveTasksOutputPath
|
|
||||||
} from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the parsePRD tool with the MCP server
|
* Register the parsePRD tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerParsePRDTool(server) {
|
export function registerParsePRDTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'parse_prd',
|
name: "parse_prd",
|
||||||
description:
|
description: "Parse a Product Requirements Document (PRD) or text file to automatically generate initial tasks.",
|
||||||
"Parse a Product Requirements Document (PRD) text file to automatically generate initial tasks. Reinitializing the project is not necessary to run this tool. It is recommended to run parse-prd after initializing the project and creating/importing a prd.txt file in the project root's scripts/ directory.",
|
parameters: z.object({
|
||||||
parameters: z.object({
|
input: z.string().default("tasks/tasks.json").describe("Path to the PRD document file (relative to project root or absolute)"),
|
||||||
input: z
|
numTasks: z.string().optional().describe("Approximate number of top-level tasks to generate (default: 10)"),
|
||||||
.string()
|
output: z.string().optional().describe("Output path for tasks.json file (relative to project root or absolute, default: tasks/tasks.json)"),
|
||||||
.optional()
|
force: z.boolean().optional().describe("Allow overwriting an existing tasks.json file."),
|
||||||
.default('scripts/prd.txt')
|
projectRoot: z
|
||||||
.describe('Absolute path to the PRD document file (.txt, .md, etc.)'),
|
.string()
|
||||||
numTasks: z
|
.optional()
|
||||||
.string()
|
.describe(
|
||||||
.optional()
|
"Root directory of the project (default: automatically detected from session or CWD)"
|
||||||
.describe(
|
),
|
||||||
'Approximate number of top-level tasks to generate (default: 10). As the agent, if you have enough information, ensure to enter a number of tasks that would logically scale with project complexity. Avoid entering numbers above 50 due to context window limitations.'
|
}),
|
||||||
),
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
output: z
|
try {
|
||||||
.string()
|
log.info(`Parsing PRD with args: ${JSON.stringify(args)}`);
|
||||||
.optional()
|
|
||||||
.describe(
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
'Output path for tasks.json file (default: tasks/tasks.json)'
|
|
||||||
),
|
if (!rootFolder && args.projectRoot) {
|
||||||
force: z
|
rootFolder = args.projectRoot;
|
||||||
.boolean()
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
.optional()
|
}
|
||||||
.describe('Allow overwriting an existing tasks.json file.'),
|
|
||||||
append: z
|
const result = await parsePRDDirect({
|
||||||
.boolean()
|
projectRoot: rootFolder,
|
||||||
.optional()
|
...args
|
||||||
.describe(
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
'Append new tasks to existing tasks.json instead of overwriting'
|
|
||||||
),
|
// await reportProgress({ progress: 100 });
|
||||||
projectRoot: z
|
|
||||||
.string()
|
if (result.success) {
|
||||||
.describe('The directory of the project. Must be absolute path.')
|
log.info(`Successfully parsed PRD: ${result.data.message}`);
|
||||||
}),
|
} else {
|
||||||
execute: async (args, { log, session }) => {
|
log.error(`Failed to parse PRD: ${result.error?.message || 'Unknown error'}`);
|
||||||
try {
|
}
|
||||||
log.info(`Parsing PRD with args: ${JSON.stringify(args)}`);
|
|
||||||
|
return handleApiResult(result, log, 'Error parsing PRD');
|
||||||
// Get project root from args or session
|
} catch (error) {
|
||||||
const rootFolder =
|
log.error(`Error in parse-prd tool: ${error.message}`);
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
if (!rootFolder) {
|
},
|
||||||
return createErrorResponse(
|
});
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
}
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve input (PRD) and output (tasks.json) paths using the utility
|
|
||||||
const { projectRoot, prdPath, tasksJsonPath } = resolveProjectPaths(
|
|
||||||
rootFolder,
|
|
||||||
args,
|
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check if PRD path was found (resolveProjectPaths returns null if not found and not provided)
|
|
||||||
if (!prdPath) {
|
|
||||||
return createErrorResponse(
|
|
||||||
'No PRD document found or provided. Please ensure a PRD file exists (e.g., PRD.md) or provide a valid input file path.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call the direct function with fully resolved paths
|
|
||||||
const result = await parsePRDDirect(
|
|
||||||
{
|
|
||||||
projectRoot: projectRoot,
|
|
||||||
input: prdPath,
|
|
||||||
output: tasksJsonPath,
|
|
||||||
numTasks: args.numTasks,
|
|
||||||
force: args.force,
|
|
||||||
append: args.append
|
|
||||||
},
|
|
||||||
log,
|
|
||||||
{ session }
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Successfully parsed PRD: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(
|
|
||||||
`Failed to parse PRD: ${result.error?.message || 'Unknown error'}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error parsing PRD');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in parse-prd tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,89 +3,58 @@
|
|||||||
* Tool for removing a dependency from a task
|
* Tool for removing a dependency from a task
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { removeDependencyDirect } from '../core/task-master-core.js';
|
import { removeDependencyDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the removeDependency tool with the MCP server
|
* Register the removeDependency tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerRemoveDependencyTool(server) {
|
export function registerRemoveDependencyTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'remove_dependency',
|
name: "remove_dependency",
|
||||||
description: 'Remove a dependency from a task',
|
description: "Remove a dependency from a task",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe('Task ID to remove dependency from'),
|
id: z.string().describe("Task ID to remove dependency from"),
|
||||||
dependsOn: z.string().describe('Task ID to remove as a dependency'),
|
dependsOn: z.string().describe("Task ID to remove as a dependency"),
|
||||||
file: z
|
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||||
.string()
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
.optional()
|
}),
|
||||||
.describe(
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
'Absolute path to the tasks file (default: tasks/tasks.json)'
|
try {
|
||||||
),
|
log.info(`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`);
|
||||||
projectRoot: z
|
// await reportProgress({ progress: 0 });
|
||||||
.string()
|
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
}),
|
|
||||||
execute: async (args, { log, session }) => {
|
if (!rootFolder && args.projectRoot) {
|
||||||
try {
|
rootFolder = args.projectRoot;
|
||||||
log.info(
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`
|
}
|
||||||
);
|
|
||||||
|
const result = await removeDependencyDirect({
|
||||||
// Get project root from args or session
|
projectRoot: rootFolder,
|
||||||
const rootFolder =
|
...args
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
|
|
||||||
// Ensure project root was determined
|
// await reportProgress({ progress: 100 });
|
||||||
if (!rootFolder) {
|
|
||||||
return createErrorResponse(
|
if (result.success) {
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
log.info(`Successfully removed dependency: ${result.data.message}`);
|
||||||
);
|
} else {
|
||||||
}
|
log.error(`Failed to remove dependency: ${result.error.message}`);
|
||||||
|
}
|
||||||
// Resolve the path to tasks.json
|
|
||||||
let tasksJsonPath;
|
return handleApiResult(result, log, 'Error removing dependency');
|
||||||
try {
|
} catch (error) {
|
||||||
tasksJsonPath = findTasksJsonPath(
|
log.error(`Error in removeDependency tool: ${error.message}`);
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
return createErrorResponse(error.message);
|
||||||
log
|
}
|
||||||
);
|
}
|
||||||
} catch (error) {
|
});
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
}
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await removeDependencyDirect(
|
|
||||||
{
|
|
||||||
// Pass the explicitly resolved path
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
// Pass other relevant args
|
|
||||||
id: args.id,
|
|
||||||
dependsOn: args.dependsOn
|
|
||||||
},
|
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Successfully removed dependency: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(`Failed to remove dependency: ${result.error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error removing dependency');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in removeDependency tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -3,101 +3,59 @@
|
|||||||
* Tool for removing subtasks from parent tasks
|
* Tool for removing subtasks from parent tasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from 'zod';
|
import { z } from "zod";
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from './utils.js';
|
} from "./utils.js";
|
||||||
import { removeSubtaskDirect } from '../core/task-master-core.js';
|
import { removeSubtaskDirect } from "../core/task-master-core.js";
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the removeSubtask tool with the MCP server
|
* Register the removeSubtask tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerRemoveSubtaskTool(server) {
|
export function registerRemoveSubtaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'remove_subtask',
|
name: "remove_subtask",
|
||||||
description: 'Remove a subtask from its parent task',
|
description: "Remove a subtask from its parent task",
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z
|
id: z.string().describe("Subtask ID to remove in format 'parentId.subtaskId' (required)"),
|
||||||
.string()
|
convert: z.boolean().optional().describe("Convert the subtask to a standalone task instead of deleting it"),
|
||||||
.describe(
|
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
||||||
"Subtask ID to remove in format 'parentId.subtaskId' (required)"
|
skipGenerate: z.boolean().optional().describe("Skip regenerating task files"),
|
||||||
),
|
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
||||||
convert: z
|
}),
|
||||||
.boolean()
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
.optional()
|
try {
|
||||||
.describe(
|
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
||||||
'Convert the subtask to a standalone task instead of deleting it'
|
// await reportProgress({ progress: 0 });
|
||||||
),
|
|
||||||
file: z
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
.string()
|
|
||||||
.optional()
|
if (!rootFolder && args.projectRoot) {
|
||||||
.describe(
|
rootFolder = args.projectRoot;
|
||||||
'Absolute path to the tasks file (default: tasks/tasks.json)'
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
),
|
}
|
||||||
skipGenerate: z
|
|
||||||
.boolean()
|
const result = await removeSubtaskDirect({
|
||||||
.optional()
|
projectRoot: rootFolder,
|
||||||
.describe('Skip regenerating task files'),
|
...args
|
||||||
projectRoot: z
|
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
||||||
.string()
|
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
// await reportProgress({ progress: 100 });
|
||||||
}),
|
|
||||||
execute: async (args, { log, session }) => {
|
if (result.success) {
|
||||||
try {
|
log.info(`Subtask removed successfully: ${result.data.message}`);
|
||||||
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
} else {
|
||||||
|
log.error(`Failed to remove subtask: ${result.error.message}`);
|
||||||
// Get project root from args or session
|
}
|
||||||
const rootFolder =
|
|
||||||
args.projectRoot || getProjectRootFromSession(session, log);
|
return handleApiResult(result, log, 'Error removing subtask');
|
||||||
|
} catch (error) {
|
||||||
// Ensure project root was determined
|
log.error(`Error in removeSubtask tool: ${error.message}`);
|
||||||
if (!rootFolder) {
|
return createErrorResponse(error.message);
|
||||||
return createErrorResponse(
|
}
|
||||||
'Could not determine project root. Please provide it explicitly or ensure your session contains valid root information.'
|
},
|
||||||
);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve the path to tasks.json
|
|
||||||
let tasksJsonPath;
|
|
||||||
try {
|
|
||||||
tasksJsonPath = findTasksJsonPath(
|
|
||||||
{ projectRoot: rootFolder, file: args.file },
|
|
||||||
log
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error finding tasks.json: ${error.message}`);
|
|
||||||
return createErrorResponse(
|
|
||||||
`Failed to find tasks.json: ${error.message}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await removeSubtaskDirect(
|
|
||||||
{
|
|
||||||
// Pass the explicitly resolved path
|
|
||||||
tasksJsonPath: tasksJsonPath,
|
|
||||||
// Pass other relevant args
|
|
||||||
id: args.id,
|
|
||||||
convert: args.convert,
|
|
||||||
skipGenerate: args.skipGenerate
|
|
||||||
},
|
|
||||||
log
|
|
||||||
);
|
|
||||||
|
|
||||||
if (result.success) {
|
|
||||||
log.info(`Subtask removed successfully: ${result.data.message}`);
|
|
||||||
} else {
|
|
||||||
log.error(`Failed to remove subtask: ${result.error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error removing subtask');
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in removeSubtask tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user