Compare commits
58 Commits
v0.10.2
...
crunchyman
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b2ac3a4ef4 | ||
|
|
b2396fd8fe | ||
|
|
a99b2b20b3 | ||
|
|
4136ef5679 | ||
|
|
a56a3628b3 | ||
|
|
9dc5e75760 | ||
|
|
16f4d4b932 | ||
|
|
7fef5ab488 | ||
|
|
38e416ef33 | ||
|
|
aa185b28b2 | ||
|
|
76618187f6 | ||
|
|
757fd478d2 | ||
|
|
6e6407f683 | ||
|
|
80f933cd82 | ||
|
|
2c3986c097 | ||
|
|
7086a77625 | ||
|
|
66ca02242d | ||
|
|
c211818303 | ||
|
|
728ba254c9 | ||
|
|
42585519d3 | ||
|
|
cd4f4e66d7 | ||
|
|
a5370ebdb7 | ||
|
|
1094f50014 | ||
|
|
4604f96a92 | ||
|
|
e70f44b6fb | ||
|
|
57f655affd | ||
|
|
38cd889ccd | ||
|
|
18ea4dd4a8 | ||
|
|
ba651645b0 | ||
|
|
42cac13f2c | ||
|
|
257160a967 | ||
|
|
9fd42eeafd | ||
|
|
eeae027d2b | ||
|
|
a10470ba2f | ||
|
|
397c181202 | ||
|
|
179eb85a4c | ||
|
|
fb336ded3e | ||
|
|
5ec3651e64 | ||
|
|
b41946c485 | ||
|
|
a8b055f05d | ||
|
|
a8e4bb0407 | ||
|
|
0663ff1bea | ||
|
|
3582798293 | ||
|
|
37c5b83f60 | ||
|
|
f3e6f0b70e | ||
|
|
eafdb47418 | ||
|
|
44db895303 | ||
|
|
472b517e22 | ||
|
|
1d807541ae | ||
|
|
08d3f2db26 | ||
|
|
cea14b55b3 | ||
|
|
707618ca5d | ||
|
|
1abcf69ecd | ||
|
|
751854d5ee | ||
|
|
444fb24022 | ||
|
|
bc4e68118c | ||
|
|
a9392c8a0d | ||
|
|
f49684a802 |
@@ -7,7 +7,7 @@
|
||||
"commit": false,
|
||||
"fixed": [],
|
||||
"linked": [],
|
||||
"access": "restricted",
|
||||
"access": "public",
|
||||
"baseBranch": "main",
|
||||
"updateInternalDependencies": "patch",
|
||||
"ignore": []
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Added changeset config #39
|
||||
Add CI for testing
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
add github actions to automate github and npm releases
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Implement MCP server for all commands using tools.
|
||||
5
.changeset/red-lights-mix.md
Normal file
5
.changeset/red-lights-mix.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Fix github actions creating npm releases on next branch push
|
||||
8
.cursor/mcp.json
Normal file
8
.cursor/mcp.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"taskmaster-ai": {
|
||||
"command": "node",
|
||||
"args": ["./mcp-server/server.js"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ alwaysApply: false
|
||||
|
||||
- **[`commands.js`](mdc:scripts/modules/commands.js): Command Handling**
|
||||
- **Purpose**: Defines and registers all CLI commands using Commander.js.
|
||||
- **Responsibilities**:
|
||||
- **Responsibilities** (See also: [`commands.mdc`](mdc:.cursor/rules/commands.mdc)):
|
||||
- Parses command-line arguments and options.
|
||||
- Invokes appropriate functions from other modules to execute commands.
|
||||
- Handles user input and output related to command execution.
|
||||
@@ -86,7 +86,7 @@ alwaysApply: false
|
||||
|
||||
- **[`utils.js`](mdc:scripts/modules/utils.js): Utility Functions and Configuration**
|
||||
- **Purpose**: Provides reusable utility functions and global configuration settings used across the application.
|
||||
- **Responsibilities**:
|
||||
- **Responsibilities** (See also: [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc)):
|
||||
- Manages global configuration settings loaded from environment variables and defaults.
|
||||
- Implements logging utility with different log levels and output formatting.
|
||||
- Provides file system operation utilities (read/write JSON files).
|
||||
@@ -101,6 +101,19 @@ alwaysApply: false
|
||||
- `formatTaskId(id)` / `findTaskById(tasks, taskId)`: Task ID and search utilities.
|
||||
- `findCycles(subtaskId, dependencyMap)`: Cycle detection algorithm.
|
||||
|
||||
- **[`mcp-server/`](mdc:mcp-server/): MCP Server Integration**
|
||||
- **Purpose**: Provides an MCP (Model Context Protocol) interface for Task Master, allowing integration with external tools like Cursor. Uses FastMCP framework.
|
||||
- **Responsibilities** (See also: [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc)):
|
||||
- Registers Task Master functionalities as tools consumable via MCP.
|
||||
- Handles MCP requests and translates them into calls to the Task Master core logic.
|
||||
- Prefers direct function calls to core modules via [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js) for performance.
|
||||
- Uses CLI execution via `executeTaskMasterCommand` as a fallback.
|
||||
- **Implements Caching**: Utilizes a caching layer (`ContextManager` with `lru-cache`) invoked via `getCachedOrExecute` within direct function wrappers ([`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js)) to optimize performance for specific read operations (e.g., listing tasks).
|
||||
- Standardizes response formatting for MCP clients using utilities in [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
- **Key Components**:
|
||||
- `mcp-server/src/server.js`: Main server setup and initialization.
|
||||
- `mcp-server/src/tools/`: Directory containing individual tool definitions, each registering a specific Task Master command for MCP.
|
||||
|
||||
- **Data Flow and Module Dependencies**:
|
||||
|
||||
- **Commands Initiate Actions**: User commands entered via the CLI (handled by [`commands.js`](mdc:scripts/modules/commands.js)) are the entry points for most operations.
|
||||
@@ -108,10 +121,11 @@ alwaysApply: false
|
||||
- **UI for Presentation**: [`ui.js`](mdc:scripts/modules/ui.js) is used by command handlers and task/dependency managers to display information to the user. UI functions primarily consume data and format it for output, without modifying core application state.
|
||||
- **Utilities for Common Tasks**: [`utils.js`](mdc:scripts/modules/utils.js) provides helper functions used by all other modules for configuration, logging, file operations, and common data manipulations.
|
||||
- **AI Services Integration**: AI functionalities (complexity analysis, task expansion, PRD parsing) are invoked from [`task-manager.js`](mdc:scripts/modules/task-manager.js) and potentially [`commands.js`](mdc:scripts/modules/commands.js), likely using functions that would reside in a dedicated `ai-services.js` module or be integrated within `utils.js` or `task-manager.js`.
|
||||
- **MCP Server Interaction**: External tools interact with the `mcp-server`, which then calls direct function wrappers in `task-master-core.js` or falls back to `executeTaskMasterCommand`. Responses are formatted by `mcp-server/src/tools/utils.js`. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for details.
|
||||
|
||||
- **Testing Architecture**:
|
||||
|
||||
- **Test Organization Structure**:
|
||||
- **Test Organization Structure** (See also: [`tests.mdc`](mdc:.cursor/rules/tests.mdc)):
|
||||
- **Unit Tests**: Located in `tests/unit/`, reflect the module structure with one test file per module
|
||||
- **Integration Tests**: Located in `tests/integration/`, test interactions between modules
|
||||
- **End-to-End Tests**: Located in `tests/e2e/`, test complete workflows from a user perspective
|
||||
|
||||
@@ -52,6 +52,28 @@ alwaysApply: false
|
||||
|
||||
> **Note**: Although options are defined with kebab-case (`--num-tasks`), Commander.js stores them internally as camelCase properties. Access them in code as `options.numTasks`, not `options['num-tasks']`.
|
||||
|
||||
- **Boolean Flag Conventions**:
|
||||
- ✅ DO: Use positive flags with `--skip-` prefix for disabling behavior
|
||||
- ❌ DON'T: Use negated boolean flags with `--no-` prefix
|
||||
- ✅ DO: Use consistent flag handling across all commands
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Use positive flag with skip- prefix
|
||||
.option('--skip-generate', 'Skip generating task files')
|
||||
|
||||
// ❌ DON'T: Use --no- prefix
|
||||
.option('--no-generate', 'Skip generating task files')
|
||||
```
|
||||
|
||||
> **Important**: When handling boolean flags in the code, make your intent clear:
|
||||
```javascript
|
||||
// ✅ DO: Use clear variable naming that matches the flag's intent
|
||||
const generateFiles = !options.skipGenerate;
|
||||
|
||||
// ❌ DON'T: Use confusing double negatives
|
||||
const dontSkipGenerate = !options.skipGenerate;
|
||||
```
|
||||
|
||||
## Input Validation
|
||||
|
||||
- **Required Parameters**:
|
||||
@@ -80,6 +102,38 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
- **Enhanced Input Validation**:
|
||||
- ✅ DO: Validate file existence for critical file operations
|
||||
- ✅ DO: Provide context-specific validation for identifiers
|
||||
- ✅ DO: Check required API keys for features that depend on them
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Validate file existence
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
console.error(chalk.red(`Error: Tasks file not found at path: ${tasksPath}`));
|
||||
if (tasksPath === 'tasks/tasks.json') {
|
||||
console.log(chalk.yellow('Hint: Run task-master init or task-master parse-prd to create tasks.json first'));
|
||||
} else {
|
||||
console.log(chalk.yellow(`Hint: Check if the file path is correct: ${tasksPath}`));
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// ✅ DO: Validate task ID
|
||||
const taskId = parseInt(options.id, 10);
|
||||
if (isNaN(taskId) || taskId <= 0) {
|
||||
console.error(chalk.red(`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`));
|
||||
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// ✅ DO: Check for required API keys
|
||||
if (useResearch && !process.env.PERPLEXITY_API_KEY) {
|
||||
console.log(chalk.yellow('Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'));
|
||||
console.log(chalk.yellow('Falling back to Claude AI for task update.'));
|
||||
}
|
||||
```
|
||||
|
||||
## User Feedback
|
||||
|
||||
- **Operation Status**:
|
||||
@@ -101,6 +155,26 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
- **Success Messages with Next Steps**:
|
||||
- ✅ DO: Use boxen for important success messages with clear formatting
|
||||
- ✅ DO: Provide suggested next steps after command completion
|
||||
- ✅ DO: Include ready-to-use commands for follow-up actions
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Display success with next steps
|
||||
console.log(boxen(
|
||||
chalk.white.bold(`Subtask ${parentId}.${subtask.id} Added Successfully`) + '\n\n' +
|
||||
chalk.white(`Title: ${subtask.title}`) + '\n' +
|
||||
chalk.white(`Status: ${getStatusWithColor(subtask.status)}`) + '\n' +
|
||||
(dependencies.length > 0 ? chalk.white(`Dependencies: ${dependencies.join(', ')}`) + '\n' : '') +
|
||||
'\n' +
|
||||
chalk.white.bold('Next Steps:') + '\n' +
|
||||
chalk.cyan(`1. Run ${chalk.yellow(`task-master show ${parentId}`)} to see the parent task with all subtasks`) + '\n' +
|
||||
chalk.cyan(`2. Run ${chalk.yellow(`task-master set-status --id=${parentId}.${subtask.id} --status=in-progress`)} to start working on it`),
|
||||
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
|
||||
));
|
||||
```
|
||||
|
||||
## Command Registration
|
||||
|
||||
- **Command Grouping**:
|
||||
@@ -117,7 +191,10 @@ alwaysApply: false
|
||||
export {
|
||||
registerCommands,
|
||||
setupCLI,
|
||||
runCLI
|
||||
runCLI,
|
||||
checkForUpdate, // Include version checking functions
|
||||
compareVersions,
|
||||
displayUpgradeNotification
|
||||
};
|
||||
```
|
||||
|
||||
@@ -143,6 +220,88 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
- **Unknown Options Handling**:
|
||||
- ✅ DO: Provide clear error messages for unknown options
|
||||
- ✅ DO: Show available options when an unknown option is used
|
||||
- ✅ DO: Include command-specific help displays for common errors
|
||||
- ❌ DON'T: Allow unknown options with `.allowUnknownOption()`
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Register global error handlers for unknown options
|
||||
programInstance.on('option:unknown', function(unknownOption) {
|
||||
const commandName = this._name || 'unknown';
|
||||
console.error(chalk.red(`Error: Unknown option '${unknownOption}'`));
|
||||
console.error(chalk.yellow(`Run 'task-master ${commandName} --help' to see available options`));
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// ✅ DO: Add command-specific help displays
|
||||
function showCommandHelp() {
|
||||
console.log(boxen(
|
||||
chalk.white.bold('Command Help') + '\n\n' +
|
||||
chalk.cyan('Usage:') + '\n' +
|
||||
` task-master command --option1=<value> [options]\n\n` +
|
||||
chalk.cyan('Options:') + '\n' +
|
||||
' --option1 <value> Description of option1 (required)\n' +
|
||||
' --option2 <value> Description of option2\n\n' +
|
||||
chalk.cyan('Examples:') + '\n' +
|
||||
' task-master command --option1=value --option2=value',
|
||||
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
|
||||
));
|
||||
}
|
||||
```
|
||||
|
||||
- **Global Error Handling**:
|
||||
- ✅ DO: Set up global error handlers for uncaught exceptions
|
||||
- ✅ DO: Detect and format Commander-specific errors
|
||||
- ✅ DO: Provide suitable guidance for fixing common errors
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Set up global error handlers with helpful messages
|
||||
process.on('uncaughtException', (err) => {
|
||||
// Handle Commander-specific errors
|
||||
if (err.code === 'commander.unknownOption') {
|
||||
const option = err.message.match(/'([^']+)'/)?.[1];
|
||||
console.error(chalk.red(`Error: Unknown option '${option}'`));
|
||||
console.error(chalk.yellow(`Run 'task-master <command> --help' to see available options`));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Handle other error types...
|
||||
console.error(chalk.red(`Error: ${err.message}`));
|
||||
process.exit(1);
|
||||
});
|
||||
```
|
||||
|
||||
- **Contextual Error Handling**:
|
||||
- ✅ DO: Provide specific error handling for common issues
|
||||
- ✅ DO: Include troubleshooting hints for each error type
|
||||
- ✅ DO: Use consistent error formatting across all commands
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Provide specific error handling with guidance
|
||||
try {
|
||||
// Implementation
|
||||
} catch (error) {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
// Provide more helpful error messages for common issues
|
||||
if (error.message.includes('task') && error.message.includes('not found')) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(' 1. Run task-master list to see all available task IDs');
|
||||
console.log(' 2. Use a valid task ID with the --id parameter');
|
||||
} else if (error.message.includes('API key')) {
|
||||
console.log(chalk.yellow('\nThis error is related to API keys. Check your environment variables.'));
|
||||
}
|
||||
|
||||
if (CONFIG.debug) {
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
```
|
||||
|
||||
## Integration with Other Modules
|
||||
|
||||
- **Import Organization**:
|
||||
@@ -155,6 +314,7 @@ alwaysApply: false
|
||||
import { program } from 'commander';
|
||||
import path from 'path';
|
||||
import chalk from 'chalk';
|
||||
import https from 'https';
|
||||
|
||||
import { CONFIG, log, readJSON } from './utils.js';
|
||||
import { displayBanner, displayHelp } from './ui.js';
|
||||
@@ -172,30 +332,22 @@ alwaysApply: false
|
||||
.description('Add a new subtask to a parent task or convert an existing task to a subtask')
|
||||
.option('-f, --file <path>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option('-p, --parent <id>', 'ID of the parent task (required)')
|
||||
.option('-e, --existing <id>', 'ID of an existing task to convert to a subtask')
|
||||
.option('-i, --task-id <id>', 'Existing task ID to convert to subtask')
|
||||
.option('-t, --title <title>', 'Title for the new subtask (when not converting)')
|
||||
.option('-d, --description <description>', 'Description for the new subtask (when not converting)')
|
||||
.option('--details <details>', 'Implementation details for the new subtask (when not converting)')
|
||||
.option('--dependencies <ids>', 'Comma-separated list of subtask IDs this subtask depends on')
|
||||
.option('--status <status>', 'Initial status for the subtask', 'pending')
|
||||
.option('--skip-generate', 'Skip regenerating task files')
|
||||
.action(async (options) => {
|
||||
// Validate required parameters
|
||||
if (!options.parent) {
|
||||
console.error(chalk.red('Error: --parent parameter is required'));
|
||||
showAddSubtaskHelp(); // Show contextual help
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Validate that either existing task ID or title is provided
|
||||
if (!options.existing && !options.title) {
|
||||
console.error(chalk.red('Error: Either --existing or --title must be provided'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
// Implementation
|
||||
} catch (error) {
|
||||
// Error handling
|
||||
}
|
||||
// Implementation with detailed error handling
|
||||
});
|
||||
```
|
||||
|
||||
@@ -208,25 +360,75 @@ alwaysApply: false
|
||||
.option('-f, --file <path>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option('-i, --id <id>', 'ID of the subtask to remove in format "parentId.subtaskId" (required)')
|
||||
.option('-c, --convert', 'Convert the subtask to a standalone task')
|
||||
.option('--skip-generate', 'Skip regenerating task files')
|
||||
.action(async (options) => {
|
||||
// Validate required parameters
|
||||
if (!options.id) {
|
||||
console.error(chalk.red('Error: --id parameter is required'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Validate subtask ID format
|
||||
if (!options.id.includes('.')) {
|
||||
console.error(chalk.red('Error: Subtask ID must be in format "parentId.subtaskId"'));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
// Implementation
|
||||
} catch (error) {
|
||||
// Error handling
|
||||
}
|
||||
// Implementation with detailed error handling
|
||||
})
|
||||
.on('error', function(err) {
|
||||
console.error(chalk.red(`Error: ${err.message}`));
|
||||
showRemoveSubtaskHelp(); // Show contextual help
|
||||
process.exit(1);
|
||||
});
|
||||
```
|
||||
|
||||
## Version Checking and Updates
|
||||
|
||||
- **Automatic Version Checking**:
|
||||
- ✅ DO: Implement version checking to notify users of available updates
|
||||
- ✅ DO: Use non-blocking version checks that don't delay command execution
|
||||
- ✅ DO: Display update notifications after command completion
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Implement version checking function
|
||||
async function checkForUpdate() {
|
||||
// Implementation details...
|
||||
return { currentVersion, latestVersion, needsUpdate };
|
||||
}
|
||||
|
||||
// ✅ DO: Implement semantic version comparison
|
||||
function compareVersions(v1, v2) {
|
||||
const v1Parts = v1.split('.').map(p => parseInt(p, 10));
|
||||
const v2Parts = v2.split('.').map(p => parseInt(p, 10));
|
||||
|
||||
// Implementation details...
|
||||
return result; // -1, 0, or 1
|
||||
}
|
||||
|
||||
// ✅ DO: Display attractive update notifications
|
||||
function displayUpgradeNotification(currentVersion, latestVersion) {
|
||||
const message = boxen(
|
||||
`${chalk.blue.bold('Update Available!')} ${chalk.dim(currentVersion)} → ${chalk.green(latestVersion)}\n\n` +
|
||||
`Run ${chalk.cyan('npm i task-master-ai@latest -g')} to update to the latest version with new features and bug fixes.`,
|
||||
{
|
||||
padding: 1,
|
||||
margin: { top: 1, bottom: 1 },
|
||||
borderColor: 'yellow',
|
||||
borderStyle: 'round'
|
||||
}
|
||||
);
|
||||
|
||||
console.log(message);
|
||||
}
|
||||
|
||||
// ✅ DO: Integrate version checking in CLI run function
|
||||
async function runCLI(argv = process.argv) {
|
||||
try {
|
||||
// Start the update check in the background - don't await yet
|
||||
const updateCheckPromise = checkForUpdate();
|
||||
|
||||
// Setup and parse
|
||||
const programInstance = setupCLI();
|
||||
await programInstance.parseAsync(argv);
|
||||
|
||||
// After command execution, check if an update is available
|
||||
const updateInfo = await updateCheckPromise;
|
||||
if (updateInfo.needsUpdate) {
|
||||
displayUpgradeNotification(updateInfo.currentVersion, updateInfo.latestVersion);
|
||||
}
|
||||
} catch (error) {
|
||||
// Error handling...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Refer to [`commands.js`](mdc:scripts/modules/commands.js) for implementation examples and [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for integration guidelines.
|
||||
@@ -5,16 +5,16 @@ alwaysApply: true
|
||||
---
|
||||
|
||||
- **Global CLI Commands**
|
||||
- Task Master now provides a global CLI through the `task-master` command
|
||||
- Task Master now provides a global CLI through the `task-master` command (See [`commands.mdc`](mdc:.cursor/rules/commands.mdc) for details)
|
||||
- All functionality from `scripts/dev.js` is available through this interface
|
||||
- Install globally with `npm install -g claude-task-master` or use locally via `npx`
|
||||
- Use `task-master <command>` instead of `node scripts/dev.js <command>`
|
||||
- Examples:
|
||||
- `task-master list` instead of `node scripts/dev.js list`
|
||||
- `task-master next` instead of `node scripts/dev.js next`
|
||||
- `task-master expand --id=3` instead of `node scripts/dev.js expand --id=3`
|
||||
- `task-master list`
|
||||
- `task-master next`
|
||||
- `task-master expand --id=3`
|
||||
- All commands accept the same options as their script equivalents
|
||||
- The CLI provides additional commands like `task-master init` for project setup
|
||||
- The CLI (`task-master`) is the **primary** way for users to interact with the application.
|
||||
|
||||
- **Development Workflow Process**
|
||||
- Start new projects by running `task-master init` or `node scripts/dev.js parse-prd --input=<prd-file.txt>` to generate initial tasks.json
|
||||
@@ -32,6 +32,7 @@ alwaysApply: true
|
||||
- Generate task files with `task-master generate` after updating tasks.json
|
||||
- Maintain valid dependency structure with `task-master fix-dependencies` when needed
|
||||
- Respect dependency chains and task priorities when selecting work
|
||||
- **MCP Server**: For integrations (like Cursor), interact via the MCP server which prefers direct function calls. Restart the MCP server if core logic in `scripts/modules` changes. See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc).
|
||||
- Report progress regularly using the list command
|
||||
|
||||
- **Task Complexity Analysis**
|
||||
@@ -49,7 +50,7 @@ alwaysApply: true
|
||||
- Use `--prompt="<context>"` to provide additional context when needed
|
||||
- Review and adjust generated subtasks as necessary
|
||||
- Use `--all` flag to expand multiple pending tasks at once
|
||||
- If subtasks need regeneration, clear them first with `clear-subtasks` command
|
||||
- If subtasks need regeneration, clear them first with `clear-subtasks` command (See Command Reference below)
|
||||
|
||||
- **Implementation Drift Handling**
|
||||
- When implementation differs significantly from planned approach
|
||||
@@ -79,16 +80,14 @@ alwaysApply: true
|
||||
```
|
||||
|
||||
- **Command Reference: parse-prd**
|
||||
- Legacy Syntax: `node scripts/dev.js parse-prd --input=<prd-file.txt>`
|
||||
- CLI Syntax: `task-master parse-prd --input=<prd-file.txt>`
|
||||
- Description: Parses a PRD document and generates a tasks.json file with structured tasks
|
||||
- Description: Parses a PRD document and generates a `tasks.json` file with structured tasks
|
||||
- Parameters:
|
||||
- `--input=<file>`: Path to the PRD text file (default: sample-prd.txt)
|
||||
- Example: `task-master parse-prd --input=requirements.txt`
|
||||
- Notes: Will overwrite existing tasks.json file. Use with caution.
|
||||
|
||||
- **Command Reference: update**
|
||||
- Legacy Syntax: `node scripts/dev.js update --from=<id> --prompt="<prompt>"`
|
||||
- CLI Syntax: `task-master update --from=<id> --prompt="<prompt>"`
|
||||
- Description: Updates tasks with ID >= specified ID based on the provided prompt
|
||||
- Parameters:
|
||||
@@ -97,8 +96,31 @@ alwaysApply: true
|
||||
- Example: `task-master update --from=4 --prompt="Now we are using Express instead of Fastify."`
|
||||
- Notes: Only updates tasks not marked as 'done'. Completed tasks remain unchanged.
|
||||
|
||||
- **Command Reference: update-task**
|
||||
- CLI Syntax: `task-master update-task --id=<id> --prompt="<prompt>"`
|
||||
- Description: Updates a single task by ID with new information
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID of the task to update (required)
|
||||
- `--prompt="<text>"`: New information or context to update the task (required)
|
||||
- `--research`: Use Perplexity AI for research-backed updates
|
||||
- Example: `task-master update-task --id=5 --prompt="Use JWT for authentication instead of sessions."`
|
||||
- Notes: Only updates tasks not marked as 'done'. Preserves completed subtasks.
|
||||
|
||||
- **Command Reference: update-subtask**
|
||||
- CLI Syntax: `task-master update-subtask --id=<id> --prompt="<prompt>"`
|
||||
- Description: Appends additional information to a specific subtask without replacing existing content
|
||||
- Parameters:
|
||||
- `--id=<id>`: ID of the subtask to update in format "parentId.subtaskId" (required)
|
||||
- `--prompt="<text>"`: Information to add to the subtask (required)
|
||||
- `--research`: Use Perplexity AI for research-backed updates
|
||||
- Example: `task-master update-subtask --id=5.2 --prompt="Add details about API rate limiting."`
|
||||
- Notes:
|
||||
- Appends new information to subtask details with timestamp
|
||||
- Does not replace existing content, only adds to it
|
||||
- Uses XML-like tags to clearly mark added information
|
||||
- Will not update subtasks marked as 'done' or 'completed'
|
||||
|
||||
- **Command Reference: generate**
|
||||
- Legacy Syntax: `node scripts/dev.js generate`
|
||||
- CLI Syntax: `task-master generate`
|
||||
- Description: Generates individual task files in tasks/ directory based on tasks.json
|
||||
- Parameters:
|
||||
@@ -108,7 +130,6 @@ alwaysApply: true
|
||||
- Notes: Overwrites existing task files. Creates tasks/ directory if needed.
|
||||
|
||||
- **Command Reference: set-status**
|
||||
- Legacy Syntax: `node scripts/dev.js set-status --id=<id> --status=<status>`
|
||||
- CLI Syntax: `task-master set-status --id=<id> --status=<status>`
|
||||
- Description: Updates the status of a specific task in tasks.json
|
||||
- Parameters:
|
||||
@@ -118,7 +139,6 @@ alwaysApply: true
|
||||
- Notes: Common values are 'done', 'pending', and 'deferred', but any string is accepted.
|
||||
|
||||
- **Command Reference: list**
|
||||
- Legacy Syntax: `node scripts/dev.js list`
|
||||
- CLI Syntax: `task-master list`
|
||||
- Description: Lists all tasks in tasks.json with IDs, titles, and status
|
||||
- Parameters:
|
||||
@@ -129,7 +149,6 @@ alwaysApply: true
|
||||
- Notes: Provides quick overview of project progress. Use at start of sessions.
|
||||
|
||||
- **Command Reference: expand**
|
||||
- Legacy Syntax: `node scripts/dev.js expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
|
||||
- CLI Syntax: `task-master expand --id=<id> [--num=<number>] [--research] [--prompt="<context>"]`
|
||||
- Description: Expands a task with subtasks for detailed implementation
|
||||
- Parameters:
|
||||
@@ -143,7 +162,6 @@ alwaysApply: true
|
||||
- Notes: Uses complexity report recommendations if available.
|
||||
|
||||
- **Command Reference: analyze-complexity**
|
||||
- Legacy Syntax: `node scripts/dev.js analyze-complexity [options]`
|
||||
- CLI Syntax: `task-master analyze-complexity [options]`
|
||||
- Description: Analyzes task complexity and generates expansion recommendations
|
||||
- Parameters:
|
||||
@@ -156,7 +174,6 @@ alwaysApply: true
|
||||
- Notes: Report includes complexity scores, recommended subtasks, and tailored prompts.
|
||||
|
||||
- **Command Reference: clear-subtasks**
|
||||
- Legacy Syntax: `node scripts/dev.js clear-subtasks --id=<id>`
|
||||
- CLI Syntax: `task-master clear-subtasks --id=<id>`
|
||||
- Description: Removes subtasks from specified tasks to allow regeneration
|
||||
- Parameters:
|
||||
@@ -230,7 +247,6 @@ alwaysApply: true
|
||||
- Dependencies are visualized with status indicators in task listings and files
|
||||
|
||||
- **Command Reference: add-dependency**
|
||||
- Legacy Syntax: `node scripts/dev.js add-dependency --id=<id> --depends-on=<id>`
|
||||
- CLI Syntax: `task-master add-dependency --id=<id> --depends-on=<id>`
|
||||
- Description: Adds a dependency relationship between two tasks
|
||||
- Parameters:
|
||||
@@ -240,7 +256,6 @@ alwaysApply: true
|
||||
- Notes: Prevents circular dependencies and duplicates; updates task files automatically
|
||||
|
||||
- **Command Reference: remove-dependency**
|
||||
- Legacy Syntax: `node scripts/dev.js remove-dependency --id=<id> --depends-on=<id>`
|
||||
- CLI Syntax: `task-master remove-dependency --id=<id> --depends-on=<id>`
|
||||
- Description: Removes a dependency relationship between two tasks
|
||||
- Parameters:
|
||||
@@ -250,7 +265,6 @@ alwaysApply: true
|
||||
- Notes: Checks if dependency actually exists; updates task files automatically
|
||||
|
||||
- **Command Reference: validate-dependencies**
|
||||
- Legacy Syntax: `node scripts/dev.js validate-dependencies [options]`
|
||||
- CLI Syntax: `task-master validate-dependencies [options]`
|
||||
- Description: Checks for and identifies invalid dependencies in tasks.json and task files
|
||||
- Parameters:
|
||||
@@ -262,7 +276,6 @@ alwaysApply: true
|
||||
- Use before fix-dependencies to audit your task structure
|
||||
|
||||
- **Command Reference: fix-dependencies**
|
||||
- Legacy Syntax: `node scripts/dev.js fix-dependencies [options]`
|
||||
- CLI Syntax: `task-master fix-dependencies [options]`
|
||||
- Description: Finds and fixes all invalid dependencies in tasks.json and task files
|
||||
- Parameters:
|
||||
@@ -275,7 +288,6 @@ alwaysApply: true
|
||||
- Provides detailed report of all fixes made
|
||||
|
||||
- **Command Reference: complexity-report**
|
||||
- Legacy Syntax: `node scripts/dev.js complexity-report [options]`
|
||||
- CLI Syntax: `task-master complexity-report [options]`
|
||||
- Description: Displays the task complexity analysis report in a formatted, easy-to-read way
|
||||
- Parameters:
|
||||
|
||||
87
.cursor/rules/mcp.mdc
Normal file
87
.cursor/rules/mcp.mdc
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
description: Guidelines for implementing and interacting with the Task Master MCP Server
|
||||
globs: mcp-server/src/**/*, scripts/modules/**/*
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Task Master MCP Server Guidelines
|
||||
|
||||
This document outlines the architecture and implementation patterns for the Task Master Model Context Protocol (MCP) server, designed for integration with tools like Cursor.
|
||||
|
||||
## Architecture Overview (See also: [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc))
|
||||
|
||||
The MCP server acts as a bridge between external tools (like Cursor) and the core Task Master CLI logic. It leverages FastMCP for the server framework.
|
||||
|
||||
- **Flow**: `External Tool (Cursor)` <-> `FastMCP Server` <-> `MCP Tools` (`mcp-server/src/tools/*.js`) <-> `Core Logic Wrappers` (`mcp-server/src/core/task-master-core.js`) <-> `Core Modules` (`scripts/modules/*.js`)
|
||||
- **Goal**: Provide a performant and reliable way for external tools to interact with Task Master functionality without directly invoking the CLI for every operation.
|
||||
|
||||
## Key Principles
|
||||
|
||||
- **Prefer Direct Function Calls**: For optimal performance and error handling, MCP tools should utilize direct function wrappers defined in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js). These wrappers call the underlying logic from the core modules (e.g., [`task-manager.js`](mdc:scripts/modules/task-manager.js)).
|
||||
- **Use `executeMCPToolAction`**: This utility function in [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) is the standard wrapper for executing the main logic within an MCP tool's `execute` function. It handles common boilerplate like logging, argument processing, calling the core action (`*Direct` function), and formatting the response.
|
||||
- **CLI Execution as Fallback**: The `executeTaskMasterCommand` utility in [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) allows executing commands via the CLI (`task-master ...`). This should **only** be used as a fallback if a direct function wrapper is not yet implemented or if a specific command intrinsically requires CLI execution.
|
||||
- **Centralized Utilities** (See also: [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc)):
|
||||
- Use `findTasksJsonPath` (in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js)) within direct function wrappers to locate the `tasks.json` file consistently.
|
||||
- **Leverage MCP Utilities**: The file [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js) contains essential helpers for MCP tool implementation:
|
||||
- `getProjectRoot`: Normalizes project paths (used internally by other utils).
|
||||
- `handleApiResult`: Standardizes handling results from direct function calls (success/error).
|
||||
- `createContentResponse`/`createErrorResponse`: Formats successful/error MCP responses.
|
||||
- `processMCPResponseData`: Filters/cleans data for MCP responses (e.g., removing `details`, `testStrategy`). This is the default processor used by `executeMCPToolAction`.
|
||||
- `executeMCPToolAction`: The primary wrapper function for tool execution logic.
|
||||
- `executeTaskMasterCommand`: Fallback for executing CLI commands.
|
||||
- **Caching**: To improve performance for frequently called read operations (like `listTasks`), a caching layer using `lru-cache` is implemented.
|
||||
- Caching logic should be added *inside* the direct function wrappers in [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js) using the `getCachedOrExecute` utility from [`tools/utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
- Generate unique cache keys based on function arguments that define a distinct call.
|
||||
- Responses will include a `fromCache` flag.
|
||||
- Cache statistics can be monitored using the `cacheStats` MCP tool (implemented via `getCacheStatsDirect`).
|
||||
|
||||
## Implementing MCP Support for a Command
|
||||
|
||||
Follow these steps to add MCP support for an existing Task Master command (see [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for more detail):
|
||||
|
||||
1. **Ensure Core Logic Exists**: Verify the core functionality is implemented and exported from the relevant module in `scripts/modules/`.
|
||||
2. **Create Direct Wrapper**: In [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js):
|
||||
- Import the core function.
|
||||
- Import `getCachedOrExecute` from `../tools/utils.js`.
|
||||
- Create an `async function yourCommandDirect(args, log)` wrapper.
|
||||
- Inside the wrapper:
|
||||
- Determine arguments needed for both the core logic and the cache key (e.g., `tasksPath`, filters). Use `findTasksJsonPath(args, log)` if needed.
|
||||
- **Generate a unique `cacheKey`** based on the arguments that define a distinct operation (e.g., `\`yourCommand:${tasksPath}:${filter}\``).
|
||||
- **Define the `coreActionFn`**: An `async` function that contains the actual call to the imported core logic function, handling its specific errors and returning `{ success: true/false, data/error }`.
|
||||
- **Call `getCachedOrExecute`**:
|
||||
```javascript
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreActionFn, // The function wrapping the core logic call
|
||||
log
|
||||
});
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
```
|
||||
- Export the wrapper function and add it to the `directFunctions` map.
|
||||
3. **Create MCP Tool**: In `mcp-server/src/tools/`:
|
||||
- Create a new file (e.g., `yourCommand.js`).
|
||||
- Import `z` for parameter schema definition.
|
||||
- Import `executeMCPToolAction` from [`./utils.js`](mdc:mcp-server/src/tools/utils.js).
|
||||
- Import the `yourCommandDirect` wrapper function from `../core/task-master-core.js`.
|
||||
- Implement `registerYourCommandTool(server)`:
|
||||
- Call `server.addTool`.
|
||||
- Define `name`, `description`, and `parameters` using `zod`. Include `projectRoot` and `file` as optional parameters if relevant.
|
||||
- Define the `async execute(args, log)` function.
|
||||
- Inside `execute`, call `executeMCPToolAction`:
|
||||
```javascript
|
||||
return executeMCPToolAction({
|
||||
actionFn: yourCommandDirect, // The direct function wrapper
|
||||
args, // Arguments from the tool call
|
||||
log, // MCP logger instance
|
||||
actionName: 'Your Command Description', // For logging
|
||||
// processResult: customProcessor // Optional: if default filtering isn't enough
|
||||
});
|
||||
```
|
||||
4. **Register Tool**: Import and call `registerYourCommandTool` in [`mcp-server/src/tools/index.js`](mdc:mcp-server/src/tools/index.js).
|
||||
5. **Update `mcp.json`**: Add the new tool definition to the `tools` array in `.cursor/mcp.json`.
|
||||
|
||||
## Handling Responses
|
||||
|
||||
- MCP tools should return data formatted by `createContentResponse` (which stringifies objects) or `createErrorResponse`.
|
||||
- The `processMCPResponseData` utility automatically removes potentially large fields like `details` and `testStrategy` from task objects before they are returned. This is the default behavior when using `executeMCPToolAction`. If specific fields need to be preserved or different fields removed, a custom `processResult` function can be passed to `executeMCPToolAction`.
|
||||
- The `handleApiResult` utility (used by `executeMCPToolAction`) now expects the result object from the direct function wrapper to include a `fromCache` boolean flag. This flag is included in the final JSON response sent to the MCP client, nested alongside the actual data (e.g., `{ "fromCache": true, "data": { ... } }`).
|
||||
@@ -8,14 +8,14 @@ alwaysApply: false
|
||||
|
||||
## Feature Placement Decision Process
|
||||
|
||||
- **Identify Feature Type**:
|
||||
- **Data Manipulation**: Features that create, read, update, or delete tasks belong in [`task-manager.js`](mdc:scripts/modules/task-manager.js)
|
||||
- **Dependency Management**: Features that handle task relationships belong in [`dependency-manager.js`](mdc:scripts/modules/dependency-manager.js)
|
||||
- **User Interface**: Features that display information to users belong in [`ui.js`](mdc:scripts/modules/ui.js)
|
||||
- **AI Integration**: Features that use AI models belong in [`ai-services.js`](mdc:scripts/modules/ai-services.js)
|
||||
- **Identify Feature Type** (See [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc) for module details):
|
||||
- **Data Manipulation**: Features that create, read, update, or delete tasks belong in [`task-manager.js`](mdc:scripts/modules/task-manager.js). Follow guidelines in [`tasks.mdc`](mdc:.cursor/rules/tasks.mdc).
|
||||
- **Dependency Management**: Features that handle task relationships belong in [`dependency-manager.js`](mdc:scripts/modules/dependency-manager.js). Follow guidelines in [`dependencies.mdc`](mdc:.cursor/rules/dependencies.mdc).
|
||||
- **User Interface**: Features that display information to users belong in [`ui.js`](mdc:scripts/modules/ui.js). Follow guidelines in [`ui.mdc`](mdc:.cursor/rules/ui.mdc).
|
||||
- **AI Integration**: Features that use AI models belong in [`ai-services.js`](mdc:scripts/modules/ai-services.js).
|
||||
- **Cross-Cutting**: Features that don't fit one category may need components in multiple modules
|
||||
|
||||
- **Command-Line Interface**:
|
||||
- **Command-Line Interface** (See [`commands.mdc`](mdc:.cursor/rules/commands.mdc)):
|
||||
- All new user-facing commands should be added to [`commands.js`](mdc:scripts/modules/commands.js)
|
||||
- Use consistent patterns for option naming and help text
|
||||
- Follow the Commander.js model for subcommand structure
|
||||
@@ -24,11 +24,11 @@ alwaysApply: false
|
||||
|
||||
The standard pattern for adding a feature follows this workflow:
|
||||
|
||||
1. **Core Logic**: Implement the business logic in the appropriate module
|
||||
2. **UI Components**: Add any display functions to [`ui.js`](mdc:scripts/modules/ui.js)
|
||||
3. **Command Integration**: Add the CLI command to [`commands.js`](mdc:scripts/modules/commands.js)
|
||||
1. **Core Logic**: Implement the business logic in the appropriate module (e.g., [`task-manager.js`](mdc:scripts/modules/task-manager.js)).
|
||||
2. **UI Components**: Add any display functions to [`ui.js`](mdc:scripts/modules/ui.js) following [`ui.mdc`](mdc:.cursor/rules/ui.mdc).
|
||||
3. **Command Integration**: Add the CLI command to [`commands.js`](mdc:scripts/modules/commands.js) following [`commands.mdc`](mdc:.cursor/rules/commands.mdc).
|
||||
4. **Testing**: Write tests for all components of the feature (following [`tests.mdc`](mdc:.cursor/rules/tests.mdc))
|
||||
5. **Configuration**: Update any configuration in [`utils.js`](mdc:scripts/modules/utils.js) if needed
|
||||
5. **Configuration**: Update any configuration in [`utils.js`](mdc:scripts/modules/utils.js) if needed, following [`utilities.mdc`](mdc:.cursor/rules/utilities.mdc).
|
||||
6. **Documentation**: Update help text and documentation in [dev_workflow.mdc](mdc:scripts/modules/dev_workflow.mdc)
|
||||
|
||||
```javascript
|
||||
@@ -294,7 +294,7 @@ For each new feature:
|
||||
|
||||
1. Add help text to the command definition
|
||||
2. Update [`dev_workflow.mdc`](mdc:scripts/modules/dev_workflow.mdc) with command reference
|
||||
3. Add examples to the appropriate sections in [`MODULE_PLAN.md`](mdc:scripts/modules/MODULE_PLAN.md)
|
||||
3. Consider updating [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc) if the feature significantly changes module responsibilities.
|
||||
|
||||
Follow the existing command reference format:
|
||||
```markdown
|
||||
@@ -309,3 +309,51 @@ Follow the existing command reference format:
|
||||
```
|
||||
|
||||
For more information on module structure, see [`MODULE_PLAN.md`](mdc:scripts/modules/MODULE_PLAN.md) and follow [`self_improve.mdc`](mdc:scripts/modules/self_improve.mdc) for best practices on updating documentation.
|
||||
|
||||
## Adding MCP Server Support for Commands
|
||||
|
||||
Integrating Task Master commands with the MCP server (for use by tools like Cursor) follows a specific pattern distinct from the CLI command implementation.
|
||||
|
||||
- **Goal**: Leverage direct function calls for performance and reliability, avoiding CLI overhead.
|
||||
- **Reference**: See [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for full details.
|
||||
|
||||
**MCP Integration Workflow**:
|
||||
|
||||
1. **Core Logic**: Ensure the command's core logic exists in the appropriate module (e.g., [`task-manager.js`](mdc:scripts/modules/task-manager.js)).
|
||||
2. **Direct Function Wrapper**:
|
||||
- In [`task-master-core.js`](mdc:mcp-server/src/core/task-master-core.js), create an `async function yourCommandDirect(args, log)`.
|
||||
- This function imports and calls the core logic.
|
||||
- It uses utilities like `findTasksJsonPath` if needed.
|
||||
- It handles argument parsing and validation specific to the direct call.
|
||||
- **Implement Caching (if applicable)**: For read operations that benefit from caching, use the `getCachedOrExecute` utility here to wrap the core logic call. Generate a unique cache key based on relevant arguments.
|
||||
- It returns a standard `{ success: true/false, data/error, fromCache: boolean }` object.
|
||||
- Export the function and add it to the `directFunctions` map.
|
||||
3. **MCP Tool File**:
|
||||
- Create a new file in `mcp-server/src/tools/` (e.g., `yourCommand.js`).
|
||||
- Import `zod`, `executeMCPToolAction` from `./utils.js`, and your `yourCommandDirect` function.
|
||||
- Implement `registerYourCommandTool(server)` which calls `server.addTool`:
|
||||
- Define the tool `name`, `description`, and `parameters` using `zod`. Include optional `projectRoot` and `file` if relevant, following patterns in existing tools.
|
||||
- Define the `async execute(args, log)` method for the tool.
|
||||
- **Crucially**, the `execute` method should primarily call `executeMCPToolAction`:
|
||||
```javascript
|
||||
// In mcp-server/src/tools/yourCommand.js
|
||||
import { executeMCPToolAction } from "./utils.js";
|
||||
import { yourCommandDirect } from "../core/task-master-core.js";
|
||||
import { z } from "zod";
|
||||
|
||||
export function registerYourCommandTool(server) {
|
||||
server.addTool({
|
||||
name: "yourCommand",
|
||||
description: "Description of your command.",
|
||||
parameters: z.object({ /* zod schema */ }),
|
||||
async execute(args, log) {
|
||||
return executeMCPToolAction({
|
||||
actionFn: yourCommandDirect, // Pass the direct function wrapper
|
||||
args, log, actionName: "Your Command Description"
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
4. **Register in Tool Index**: Import and call `registerYourCommandTool` in [`mcp-server/src/tools/index.js`](mdc:mcp-server/src/tools/index.js).
|
||||
5. **Update `mcp.json`**: Add the tool definition to `.cursor/mcp.json`.
|
||||
|
||||
@@ -7,7 +7,7 @@ globs: "**/*.test.js,tests/**/*"
|
||||
|
||||
## Test Organization Structure
|
||||
|
||||
- **Unit Tests**
|
||||
- **Unit Tests** (See [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc) for module breakdown)
|
||||
- Located in `tests/unit/`
|
||||
- Test individual functions and utilities in isolation
|
||||
- Mock all external dependencies
|
||||
@@ -324,7 +324,7 @@ When testing ES modules (`"type": "module"` in package.json), traditional mockin
|
||||
## Testing Common Components
|
||||
|
||||
- **CLI Commands**
|
||||
- Mock the action handlers and verify they're called with correct arguments
|
||||
- Mock the action handlers (defined in [`commands.js`](mdc:scripts/modules/commands.js)) and verify they're called with correct arguments
|
||||
- Test command registration and option parsing
|
||||
- Use `commander` test utilities or custom mocks
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
description: Guidelines for implementing utility functions
|
||||
globs: scripts/modules/utils.js
|
||||
globs: scripts/modules/utils.js, mcp-server/src/**/*
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
@@ -273,13 +273,81 @@ alwaysApply: false
|
||||
}
|
||||
```
|
||||
|
||||
## MCP Server Utilities (`mcp-server/src/tools/utils.js`)
|
||||
|
||||
- **Purpose**: These utilities specifically support the MCP server tools, handling communication patterns and data formatting for MCP clients. Refer to [`mcp.mdc`](mdc:.cursor/rules/mcp.mdc) for usage patterns.
|
||||
|
||||
-(See also: [`tests.mdc`](mdc:.cursor/rules/tests.mdc) for testing these utilities)
|
||||
|
||||
- **`getProjectRoot(projectRootRaw, log)`**:
|
||||
- Normalizes a potentially relative project root path into an absolute path.
|
||||
- Defaults to `process.cwd()` if `projectRootRaw` is not provided.
|
||||
- Primarily used *internally* by `executeMCPToolAction` and `executeTaskMasterCommand`. Tools usually don't need to call this directly.
|
||||
|
||||
- **`executeMCPToolAction({ actionFn, args, log, actionName, processResult })`**:
|
||||
- ✅ **DO**: Use this as the main wrapper inside an MCP tool's `execute` method when calling a direct function wrapper.
|
||||
- Handles standard workflow: logs action start, normalizes `projectRoot`, calls the `actionFn` (e.g., `listTasksDirect`), processes the result (using `handleApiResult`), logs success/error, and returns a formatted MCP response (`createContentResponse`/`createErrorResponse`).
|
||||
- Simplifies tool implementation significantly by handling boilerplate.
|
||||
- Accepts an optional `processResult` function to customize data filtering/transformation before sending the response (defaults to `processMCPResponseData`).
|
||||
|
||||
- **`handleApiResult(result, log, errorPrefix, processFunction)`**:
|
||||
- Takes the standard `{ success, data/error }` object returned by direct function wrappers (like `listTasksDirect`).
|
||||
- Checks the `success` flag.
|
||||
- If successful, processes the `data` using `processFunction` (defaults to `processMCPResponseData`).
|
||||
- Returns a formatted MCP response object using `createContentResponse` or `createErrorResponse`.
|
||||
- Typically called *internally* by `executeMCPToolAction`.
|
||||
|
||||
- **`executeTaskMasterCommand(command, log, args, projectRootRaw)`**:
|
||||
- Executes a Task Master command using `child_process.spawnSync`.
|
||||
- Tries the global `task-master` command first, then falls back to `node scripts/dev.js`.
|
||||
- Handles project root normalization internally.
|
||||
- Returns `{ success, stdout, stderr }` or `{ success: false, error }`.
|
||||
- ❌ **DON'T**: Use this as the primary method for MCP tools. Prefer `executeMCPToolAction` with direct function calls. Use only as a fallback for commands not yet refactored or those requiring CLI execution.
|
||||
|
||||
- **`processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testStrategy'])`**:
|
||||
- Filters task data before sending it to the MCP client.
|
||||
- By default, removes the `details` and `testStrategy` fields from task objects and their subtasks to reduce payload size.
|
||||
- Can handle single task objects or data structures containing a `tasks` array (like from `listTasks`).
|
||||
- This is the default processor used by `executeMCPToolAction`.
|
||||
|
||||
```javascript
|
||||
// Example usage (typically done inside executeMCPToolAction):
|
||||
const rawResult = { success: true, data: { tasks: [ { id: 1, title: '...', details: '...', subtasks: [...] } ] } };
|
||||
const filteredData = processMCPResponseData(rawResult.data);
|
||||
// filteredData.tasks[0] will NOT have the 'details' field.
|
||||
```
|
||||
|
||||
- **`createContentResponse(content)`**:
|
||||
- ✅ **DO**: Use this (usually via `handleApiResult` or `executeMCPToolAction`) to format successful MCP responses.
|
||||
- Wraps the `content` (stringifies objects to JSON) in the standard FastMCP `{ content: [{ type: "text", text: ... }] }` structure.
|
||||
|
||||
- **`createErrorResponse(errorMessage)`**:
|
||||
- ✅ **DO**: Use this (usually via `handleApiResult` or `executeMCPToolAction`) to format error responses for MCP.
|
||||
- Wraps the `errorMessage` in the standard FastMCP error structure, including `isError: true`.
|
||||
|
||||
- **`getCachedOrExecute({ cacheKey, actionFn, log })`**:
|
||||
- ✅ **DO**: Use this utility *inside direct function wrappers* (like `listTasksDirect` in `task-master-core.js`) to implement caching for MCP operations.
|
||||
- Checks the `ContextManager` cache using `cacheKey`.
|
||||
- If a hit occurs, returns the cached result directly.
|
||||
- If a miss occurs, it executes the provided `actionFn` (which should be an async function returning `{ success, data/error }`).
|
||||
- If `actionFn` succeeds, its result is stored in the cache under `cacheKey`.
|
||||
- Returns the result (either cached or fresh) wrapped in the standard structure `{ success, data/error, fromCache: boolean }`.
|
||||
|
||||
- **`executeMCPToolAction({ actionFn, args, log, actionName, processResult })`**:
|
||||
- Update: While this function *can* technically coordinate caching if provided a `cacheKeyGenerator`, the current preferred pattern involves implementing caching *within* the `actionFn` (the direct wrapper) using `getCachedOrExecute`. `executeMCPToolAction` primarily orchestrates the call to `actionFn` and handles processing its result (including the `fromCache` flag) via `handleApiResult`.
|
||||
|
||||
- **`handleApiResult(result, log, errorPrefix, processFunction)`**:
|
||||
- Update: Now expects the `result` object to potentially contain a `fromCache` boolean flag. If present, this flag is included in the final response payload generated by `createContentResponse` (e.g., `{ fromCache: true, data: ... }`).
|
||||
|
||||
## Export Organization
|
||||
|
||||
- **Grouping Related Functions**:
|
||||
- ✅ DO: Export all utility functions in a single statement
|
||||
- ✅ DO: Group related exports together
|
||||
- ✅ DO: Export configuration constants
|
||||
- ❌ DON'T: Use default exports
|
||||
- ✅ DO: Keep utilities relevant to their location (e.g., core utils in `scripts/modules/utils.js`, MCP utils in `mcp-server/src/tools/utils.js`).
|
||||
- ✅ DO: Export all utility functions in a single statement per file.
|
||||
- ✅ DO: Group related exports together.
|
||||
- ✅ DO: Export configuration constants.
|
||||
- ❌ DON'T: Use default exports.
|
||||
- ❌ DON'T: Create circular dependencies between utility files or between utilities and the modules that use them (See [`architecture.mdc`](mdc:.cursor/rules/architecture.mdc)).
|
||||
|
||||
```javascript
|
||||
// ✅ DO: Organize exports logically
|
||||
@@ -311,4 +379,4 @@ alwaysApply: false
|
||||
};
|
||||
```
|
||||
|
||||
Refer to [`utils.js`](mdc:scripts/modules/utils.js) for implementation examples and [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for integration guidelines.
|
||||
Refer to [`utils.js`](mdc:scripts/modules/utils.js) for implementation examples and [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for integration guidelines. Use [`commands.mdc`](mdc:.cursor/rules/commands.mdc) for CLI integration details.
|
||||
95
.github/workflows/ci.yml
vendored
Normal file
95
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install Dependencies
|
||||
id: install
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||
|
||||
format-check:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Restore node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||
|
||||
- name: Format Check
|
||||
run: npm run format-check
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
test:
|
||||
needs: setup
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Restore node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
npm run test:coverage -- --coverageThreshold '{"global":{"branches":0,"functions":0,"lines":0,"statements":0}}' --detectOpenHandles --forceExit
|
||||
env:
|
||||
NODE_ENV: test
|
||||
CI: true
|
||||
FORCE_COLOR: 1
|
||||
timeout-minutes: 10
|
||||
|
||||
- name: Upload Test Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-results
|
||||
path: |
|
||||
test-results
|
||||
coverage
|
||||
junit.xml
|
||||
retention-days: 30
|
||||
@@ -3,7 +3,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -14,13 +13,25 @@ jobs:
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
*/*/node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: npm install
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Create Release Pull Request or Publish to npm
|
||||
uses: changesets/action@1.4.10
|
||||
uses: changesets/action@v1
|
||||
with:
|
||||
publish: npm run release
|
||||
env:
|
||||
6
.prettierignore
Normal file
6
.prettierignore
Normal file
@@ -0,0 +1,6 @@
|
||||
# Ignore artifacts:
|
||||
build
|
||||
coverage
|
||||
.changeset
|
||||
tasks
|
||||
package-lock.json
|
||||
11
.prettierrc
Normal file
11
.prettierrc
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"printWidth": 80,
|
||||
"tabWidth": 2,
|
||||
"useTabs": true,
|
||||
"semi": true,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "none",
|
||||
"bracketSpacing": true,
|
||||
"arrowParens": "always",
|
||||
"endOfLine": "lf"
|
||||
}
|
||||
27
CHANGELOG.md
Normal file
27
CHANGELOG.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# task-master-ai
|
||||
|
||||
## 0.10.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#80](https://github.com/eyaltoledano/claude-task-master/pull/80) [`aa185b2`](https://github.com/eyaltoledano/claude-task-master/commit/aa185b28b248b4ca93f9195b502e2f5187868eaa) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Remove non-existent package `@model-context-protocol/sdk`
|
||||
|
||||
- [#45](https://github.com/eyaltoledano/claude-task-master/pull/45) [`757fd47`](https://github.com/eyaltoledano/claude-task-master/commit/757fd478d2e2eff8506ae746c3470c6088f4d944) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Add license to repo
|
||||
|
||||
## 0.10.0
|
||||
|
||||
### Minor Changes
|
||||
|
||||
- [#44](https://github.com/eyaltoledano/claude-task-master/pull/44) [`eafdb47`](https://github.com/eyaltoledano/claude-task-master/commit/eafdb47418b444c03c092f653b438cc762d4bca8) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - add github actions to automate github and npm releases
|
||||
|
||||
- [#20](https://github.com/eyaltoledano/claude-task-master/pull/20) [`4eed269`](https://github.com/eyaltoledano/claude-task-master/commit/4eed2693789a444f704051d5fbb3ef8d460e4e69) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Implement MCP server for all commands using tools.
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- [#44](https://github.com/eyaltoledano/claude-task-master/pull/44) [`44db895`](https://github.com/eyaltoledano/claude-task-master/commit/44db895303a9209416236e3d519c8a609ad85f61) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Added changeset config #39
|
||||
|
||||
- [#50](https://github.com/eyaltoledano/claude-task-master/pull/50) [`257160a`](https://github.com/eyaltoledano/claude-task-master/commit/257160a9670b5d1942e7c623bd2c1a3fde7c06a0) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix addTask tool `projectRoot not defined`
|
||||
|
||||
- [#57](https://github.com/eyaltoledano/claude-task-master/pull/57) [`9fd42ee`](https://github.com/eyaltoledano/claude-task-master/commit/9fd42eeafdc25a96cdfb70aa3af01f525d26b4bc) Thanks [@github-actions](https://github.com/apps/github-actions)! - fix mcp server not connecting to cursor
|
||||
|
||||
- [#48](https://github.com/eyaltoledano/claude-task-master/pull/48) [`5ec3651`](https://github.com/eyaltoledano/claude-task-master/commit/5ec3651e6459add7354910a86b3c4db4d12bc5d1) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix workflows
|
||||
25
LICENSE
Normal file
25
LICENSE
Normal file
@@ -0,0 +1,25 @@
|
||||
Task Master License
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 — Eyal Toledano, Ralph Khreish
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
"Commons Clause" License Condition v1.0
|
||||
|
||||
The Software is provided to you by the Licensor under the License (defined below), subject to the following condition:
|
||||
|
||||
Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software.
|
||||
|
||||
For purposes of the foregoing, "Sell" means practicing any or all of the rights granted to you under the License to provide the Software to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/support services related to the Software), as part of a product or service whose value derives, entirely or substantially, from the functionality of the Software. Any license notice or attribution required by the License must also include this Commons Clause License Condition notice.
|
||||
|
||||
Software: All Task Master associated files (including all files in the GitHub repository "claude-task-master" and in the npm package "task-master-ai").
|
||||
|
||||
License: MIT
|
||||
|
||||
Licensor: Eyal Toledano, Ralph Khreish
|
||||
90
LICENSE.md
Normal file
90
LICENSE.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Dual License
|
||||
|
||||
This project is licensed under two separate licenses:
|
||||
|
||||
1. [Business Source License 1.1](#business-source-license-11) (BSL 1.1) for commercial use of Task Master itself
|
||||
2. [Apache License 2.0](#apache-license-20) for all other uses
|
||||
|
||||
## Business Source License 1.1
|
||||
|
||||
Terms: https://mariadb.com/bsl11/
|
||||
|
||||
Licensed Work: Task Master AI
|
||||
Additional Use Grant: You may use Task Master AI to create and commercialize your own projects and products.
|
||||
|
||||
Change Date: 2025-03-30
|
||||
Change License: None
|
||||
|
||||
The Licensed Work is subject to the Business Source License 1.1. If you are interested in using the Licensed Work in a way that competes directly with Task Master, please contact the licensors.
|
||||
|
||||
### Licensor
|
||||
|
||||
- Eyal Toledano (GitHub: @eyaltoledano)
|
||||
- Ralph (GitHub: @Crunchyman-ralph)
|
||||
|
||||
### Commercial Use Restrictions
|
||||
|
||||
This license explicitly restricts certain commercial uses of Task Master AI to the Licensors listed above. Restricted commercial uses include:
|
||||
|
||||
1. Creating commercial products or services that directly compete with Task Master AI
|
||||
2. Selling Task Master AI itself as a service
|
||||
3. Offering Task Master AI's functionality as a commercial managed service
|
||||
4. Reselling or redistributing Task Master AI for a fee
|
||||
|
||||
### Explicitly Permitted Uses
|
||||
|
||||
The following uses are explicitly allowed under this license:
|
||||
|
||||
1. Using Task Master AI to create and commercialize your own projects
|
||||
2. Using Task Master AI in commercial environments for internal development
|
||||
3. Building and selling products or services that were created using Task Master AI
|
||||
4. Using Task Master AI for commercial development as long as you're not selling Task Master AI itself
|
||||
|
||||
### Additional Terms
|
||||
|
||||
1. The right to commercialize Task Master AI itself is exclusively reserved for the Licensors
|
||||
2. No party may create commercial products that directly compete with Task Master AI without explicit written permission
|
||||
3. Forks of this repository are subject to the same restrictions regarding direct competition
|
||||
4. Contributors agree that their contributions will be subject to this same dual licensing structure
|
||||
|
||||
## Apache License 2.0
|
||||
|
||||
For all uses other than those restricted above. See [APACHE-LICENSE](./APACHE-LICENSE) for the full license text.
|
||||
|
||||
### Permitted Use Definition
|
||||
|
||||
You may use Task Master AI for any purpose, including commercial purposes, as long as you are not:
|
||||
|
||||
1. Creating a direct competitor to Task Master AI
|
||||
2. Selling Task Master AI itself as a service
|
||||
3. Redistributing Task Master AI for a fee
|
||||
|
||||
### Requirements for Use
|
||||
|
||||
1. You must include appropriate copyright notices
|
||||
2. You must state significant changes made to the software
|
||||
3. You must preserve all license notices
|
||||
|
||||
## Questions and Commercial Licensing
|
||||
|
||||
For questions about licensing or to inquire about commercial use that may compete with Task Master, please contact:
|
||||
|
||||
- Eyal Toledano (GitHub: @eyaltoledano)
|
||||
- Ralph (GitHub: @Crunchyman-ralph)
|
||||
|
||||
## Examples
|
||||
|
||||
### ✅ Allowed Uses
|
||||
|
||||
- Using Task Master to create a commercial SaaS product
|
||||
- Using Task Master in your company for development
|
||||
- Creating and selling products that were built using Task Master
|
||||
- Using Task Master to generate code for commercial projects
|
||||
- Offering consulting services where you use Task Master
|
||||
|
||||
### ❌ Restricted Uses
|
||||
|
||||
- Creating a competing AI task management tool
|
||||
- Selling access to Task Master as a service
|
||||
- Creating a hosted version of Task Master
|
||||
- Reselling Task Master's functionality
|
||||
487
README.md
487
README.md
@@ -1,9 +1,32 @@
|
||||
# Task Master
|
||||
|
||||
[](https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml)
|
||||
[](LICENSE)
|
||||
[](https://badge.fury.io/js/task-master-ai)
|
||||
|
||||
### by [@eyaltoledano](https://x.com/eyaltoledano)
|
||||
|
||||
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
||||
|
||||
## Licensing
|
||||
|
||||
Task Master is licensed under the MIT License with Commons Clause. This means you can:
|
||||
|
||||
✅ **Allowed**:
|
||||
|
||||
- Use Task Master for any purpose (personal, commercial, academic)
|
||||
- Modify the code
|
||||
- Distribute copies
|
||||
- Create and sell products built using Task Master
|
||||
|
||||
❌ **Not Allowed**:
|
||||
|
||||
- Sell Task Master itself
|
||||
- Offer Task Master as a hosted service
|
||||
- Create competing products based on Task Master
|
||||
|
||||
See the [LICENSE](LICENSE) file for the complete license text.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Node.js 14.0.0 or higher
|
||||
@@ -362,466 +385,30 @@ task-master show 1.2
|
||||
task-master update --from=<id> --prompt="<prompt>"
|
||||
```
|
||||
|
||||
### Generate Task Files
|
||||
### Update a Specific Task
|
||||
|
||||
```bash
|
||||
# Generate individual task files from tasks.json
|
||||
task-master generate
|
||||
# Update a single task by ID with new information
|
||||
task-master update-task --id=<id> --prompt="<prompt>"
|
||||
|
||||
# Use research-backed updates with Perplexity AI
|
||||
task-master update-task --id=<id> --prompt="<prompt>" --research
|
||||
```
|
||||
|
||||
### Set Task Status
|
||||
### Update a Subtask
|
||||
|
||||
```bash
|
||||
# Set status of a single task
|
||||
task-master set-status --id=<id> --status=<status>
|
||||
# Append additional information to a specific subtask
|
||||
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>"
|
||||
|
||||
# Set status for multiple tasks
|
||||
task-master set-status --id=1,2,3 --status=<status>
|
||||
# Example: Add details about API rate limiting to subtask 2 of task 5
|
||||
task-master update-subtask --id=5.2 --prompt="Add rate limiting of 100 requests per minute"
|
||||
|
||||
# Set status for subtasks
|
||||
task-master set-status --id=1.1,1.2 --status=<status>
|
||||
# Use research-backed updates with Perplexity AI
|
||||
task-master update-subtask --id=<parentId.subtaskId> --prompt="<prompt>" --research
|
||||
```
|
||||
|
||||
When marking a task as "done", all of its subtasks will automatically be marked as "done" as well.
|
||||
|
||||
### Expand Tasks
|
||||
|
||||
```bash
|
||||
# Expand a specific task with subtasks
|
||||
task-master expand --id=<id> --num=<number>
|
||||
|
||||
# Expand with additional context
|
||||
task-master expand --id=<id> --prompt="<context>"
|
||||
|
||||
# Expand all pending tasks
|
||||
task-master expand --all
|
||||
|
||||
# Force regeneration of subtasks for tasks that already have them
|
||||
task-master expand --all --force
|
||||
|
||||
# Research-backed subtask generation for a specific task
|
||||
task-master expand --id=<id> --research
|
||||
|
||||
# Research-backed generation for all tasks
|
||||
task-master expand --all --research
|
||||
```
|
||||
|
||||
### Clear Subtasks
|
||||
|
||||
```bash
|
||||
# Clear subtasks from a specific task
|
||||
task-master clear-subtasks --id=<id>
|
||||
|
||||
# Clear subtasks from multiple tasks
|
||||
task-master clear-subtasks --id=1,2,3
|
||||
|
||||
# Clear subtasks from all tasks
|
||||
task-master clear-subtasks --all
|
||||
```
|
||||
|
||||
### Analyze Task Complexity
|
||||
|
||||
```bash
|
||||
# Analyze complexity of all tasks
|
||||
task-master analyze-complexity
|
||||
|
||||
# Save report to a custom location
|
||||
task-master analyze-complexity --output=my-report.json
|
||||
|
||||
# Use a specific LLM model
|
||||
task-master analyze-complexity --model=claude-3-opus-20240229
|
||||
|
||||
# Set a custom complexity threshold (1-10)
|
||||
task-master analyze-complexity --threshold=6
|
||||
|
||||
# Use an alternative tasks file
|
||||
task-master analyze-complexity --file=custom-tasks.json
|
||||
|
||||
# Use Perplexity AI for research-backed complexity analysis
|
||||
task-master analyze-complexity --research
|
||||
```
|
||||
|
||||
### View Complexity Report
|
||||
|
||||
```bash
|
||||
# Display the task complexity analysis report
|
||||
task-master complexity-report
|
||||
|
||||
# View a report at a custom location
|
||||
task-master complexity-report --file=my-report.json
|
||||
```
|
||||
|
||||
### Managing Task Dependencies
|
||||
|
||||
```bash
|
||||
# Add a dependency to a task
|
||||
task-master add-dependency --id=<id> --depends-on=<id>
|
||||
|
||||
# Remove a dependency from a task
|
||||
task-master remove-dependency --id=<id> --depends-on=<id>
|
||||
|
||||
# Validate dependencies without fixing them
|
||||
task-master validate-dependencies
|
||||
|
||||
# Find and fix invalid dependencies automatically
|
||||
task-master fix-dependencies
|
||||
```
|
||||
|
||||
### Add a New Task
|
||||
|
||||
````bash
|
||||
# Add a new task using AI
|
||||
task-master add-task --prompt="Description of the new task"
|
||||
|
||||
# Add a task with dependencies
|
||||
task-master add-task --prompt="Description" --dependencies=1,2,3
|
||||
|
||||
# Add a task with priority
|
||||
# Task Master
|
||||
### by [@eyaltoledano](https://x.com/eyaltoledano)
|
||||
|
||||
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Node.js 14.0.0 or higher
|
||||
- Anthropic API key (Claude API)
|
||||
- Anthropic SDK version 0.39.0 or higher
|
||||
- OpenAI SDK (for Perplexity API integration, optional)
|
||||
|
||||
## Configuration
|
||||
|
||||
The script can be configured through environment variables in a `.env` file at the root of the project:
|
||||
|
||||
### Required Configuration
|
||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
||||
|
||||
### Optional Configuration
|
||||
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
||||
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
||||
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
||||
- `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation
|
||||
- `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online")
|
||||
- `DEBUG`: Enable debug logging (default: false)
|
||||
- `LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
|
||||
- `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3)
|
||||
- `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium)
|
||||
- `PROJECT_NAME`: Override default project name in tasks.json
|
||||
- `PROJECT_VERSION`: Override default version in tasks.json
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Install globally
|
||||
npm install -g task-master-ai
|
||||
|
||||
# OR install locally within your project
|
||||
npm install task-master-ai
|
||||
````
|
||||
|
||||
### Initialize a new project
|
||||
|
||||
```bash
|
||||
# If installed globally
|
||||
task-master init
|
||||
|
||||
# If installed locally
|
||||
npx task-master-init
|
||||
```
|
||||
|
||||
This will prompt you for project details and set up a new project with the necessary files and structure.
|
||||
|
||||
### Important Notes
|
||||
|
||||
1. This package uses ES modules. Your package.json should include `"type": "module"`.
|
||||
2. The Anthropic SDK version should be 0.39.0 or higher.
|
||||
|
||||
## Quick Start with Global Commands
|
||||
|
||||
After installing the package globally, you can use these CLI commands from any directory:
|
||||
|
||||
```bash
|
||||
# Initialize a new project
|
||||
task-master init
|
||||
|
||||
# Parse a PRD and generate tasks
|
||||
task-master parse-prd your-prd.txt
|
||||
|
||||
# List all tasks
|
||||
task-master list
|
||||
|
||||
# Show the next task to work on
|
||||
task-master next
|
||||
|
||||
# Generate task files
|
||||
task-master generate
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### If `task-master init` doesn't respond:
|
||||
|
||||
Try running it with Node directly:
|
||||
|
||||
```bash
|
||||
node node_modules/claude-task-master/scripts/init.js
|
||||
```
|
||||
|
||||
Or clone the repository and run:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/eyaltoledano/claude-task-master.git
|
||||
cd claude-task-master
|
||||
node scripts/init.js
|
||||
```
|
||||
|
||||
## Task Structure
|
||||
|
||||
Tasks in tasks.json have the following structure:
|
||||
|
||||
- `id`: Unique identifier for the task (Example: `1`)
|
||||
- `title`: Brief, descriptive title of the task (Example: `"Initialize Repo"`)
|
||||
- `description`: Concise description of what the task involves (Example: `"Create a new repository, set up initial structure."`)
|
||||
- `status`: Current state of the task (Example: `"pending"`, `"done"`, `"deferred"`)
|
||||
- `dependencies`: IDs of tasks that must be completed before this task (Example: `[1, 2]`)
|
||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending)
|
||||
- This helps quickly identify which prerequisite tasks are blocking work
|
||||
- `priority`: Importance level of the task (Example: `"high"`, `"medium"`, `"low"`)
|
||||
- `details`: In-depth implementation instructions (Example: `"Use GitHub client ID/secret, handle callback, set session token."`)
|
||||
- `testStrategy`: Verification approach (Example: `"Deploy and call endpoint to confirm 'Hello World' response."`)
|
||||
- `subtasks`: List of smaller, more specific tasks that make up the main task (Example: `[{"id": 1, "title": "Configure OAuth", ...}]`)
|
||||
|
||||
## Integrating with Cursor AI
|
||||
|
||||
Claude Task Master is designed to work seamlessly with [Cursor AI](https://www.cursor.so/), providing a structured workflow for AI-driven development.
|
||||
|
||||
### Setup with Cursor
|
||||
|
||||
1. After initializing your project, open it in Cursor
|
||||
2. The `.cursor/rules/dev_workflow.mdc` file is automatically loaded by Cursor, providing the AI with knowledge about the task management system
|
||||
3. Place your PRD document in the `scripts/` directory (e.g., `scripts/prd.txt`)
|
||||
4. Open Cursor's AI chat and switch to Agent mode
|
||||
|
||||
### Initial Task Generation
|
||||
|
||||
In Cursor's AI chat, instruct the agent to generate tasks from your PRD:
|
||||
|
||||
```
|
||||
Please use the task-master parse-prd command to generate tasks from my PRD. The PRD is located at scripts/prd.txt.
|
||||
```
|
||||
|
||||
The agent will execute:
|
||||
|
||||
```bash
|
||||
task-master parse-prd scripts/prd.txt
|
||||
```
|
||||
|
||||
This will:
|
||||
|
||||
- Parse your PRD document
|
||||
- Generate a structured `tasks.json` file with tasks, dependencies, priorities, and test strategies
|
||||
- The agent will understand this process due to the Cursor rules
|
||||
|
||||
### Generate Individual Task Files
|
||||
|
||||
Next, ask the agent to generate individual task files:
|
||||
|
||||
```
|
||||
Please generate individual task files from tasks.json
|
||||
```
|
||||
|
||||
The agent will execute:
|
||||
|
||||
```bash
|
||||
task-master generate
|
||||
```
|
||||
|
||||
This creates individual task files in the `tasks/` directory (e.g., `task_001.txt`, `task_002.txt`), making it easier to reference specific tasks.
|
||||
|
||||
## AI-Driven Development Workflow
|
||||
|
||||
The Cursor agent is pre-configured (via the rules file) to follow this workflow:
|
||||
|
||||
### 1. Task Discovery and Selection
|
||||
|
||||
Ask the agent to list available tasks:
|
||||
|
||||
```
|
||||
What tasks are available to work on next?
|
||||
```
|
||||
|
||||
The agent will:
|
||||
|
||||
- Run `task-master list` to see all tasks
|
||||
- Run `task-master next` to determine the next task to work on
|
||||
- Analyze dependencies to determine which tasks are ready to be worked on
|
||||
- Prioritize tasks based on priority level and ID order
|
||||
- Suggest the next task(s) to implement
|
||||
|
||||
### 2. Task Implementation
|
||||
|
||||
When implementing a task, the agent will:
|
||||
|
||||
- Reference the task's details section for implementation specifics
|
||||
- Consider dependencies on previous tasks
|
||||
- Follow the project's coding standards
|
||||
- Create appropriate tests based on the task's testStrategy
|
||||
|
||||
You can ask:
|
||||
|
||||
```
|
||||
Let's implement task 3. What does it involve?
|
||||
```
|
||||
|
||||
### 3. Task Verification
|
||||
|
||||
Before marking a task as complete, verify it according to:
|
||||
|
||||
- The task's specified testStrategy
|
||||
- Any automated tests in the codebase
|
||||
- Manual verification if required
|
||||
|
||||
### 4. Task Completion
|
||||
|
||||
When a task is completed, tell the agent:
|
||||
|
||||
```
|
||||
Task 3 is now complete. Please update its status.
|
||||
```
|
||||
|
||||
The agent will execute:
|
||||
|
||||
```bash
|
||||
task-master set-status --id=3 --status=done
|
||||
```
|
||||
|
||||
### 5. Handling Implementation Drift
|
||||
|
||||
If during implementation, you discover that:
|
||||
|
||||
- The current approach differs significantly from what was planned
|
||||
- Future tasks need to be modified due to current implementation choices
|
||||
- New dependencies or requirements have emerged
|
||||
|
||||
Tell the agent:
|
||||
|
||||
```
|
||||
We've changed our approach. We're now using Express instead of Fastify. Please update all future tasks to reflect this change.
|
||||
```
|
||||
|
||||
The agent will execute:
|
||||
|
||||
```bash
|
||||
task-master update --from=4 --prompt="Now we are using Express instead of Fastify."
|
||||
```
|
||||
|
||||
This will rewrite or re-scope subsequent tasks in tasks.json while preserving completed work.
|
||||
|
||||
### 6. Breaking Down Complex Tasks
|
||||
|
||||
For complex tasks that need more granularity:
|
||||
|
||||
```
|
||||
Task 5 seems complex. Can you break it down into subtasks?
|
||||
```
|
||||
|
||||
The agent will execute:
|
||||
|
||||
```bash
|
||||
task-master expand --id=5 --num=3
|
||||
```
|
||||
|
||||
You can provide additional context:
|
||||
|
||||
```
|
||||
Please break down task 5 with a focus on security considerations.
|
||||
```
|
||||
|
||||
The agent will execute:
|
||||
|
||||
```bash
|
||||
task-master expand --id=5 --prompt="Focus on security aspects"
|
||||
```
|
||||
|
||||
You can also expand all pending tasks:
|
||||
|
||||
```
|
||||
Please break down all pending tasks into subtasks.
|
||||
```
|
||||
|
||||
The agent will execute:
|
||||
|
||||
```bash
|
||||
task-master expand --all
|
||||
```
|
||||
|
||||
For research-backed subtask generation using Perplexity AI:
|
||||
|
||||
```
|
||||
Please break down task 5 using research-backed generation.
|
||||
```
|
||||
|
||||
The agent will execute:
|
||||
|
||||
```bash
|
||||
task-master expand --id=5 --research
|
||||
```
|
||||
|
||||
## Command Reference
|
||||
|
||||
Here's a comprehensive reference of all available commands:
|
||||
|
||||
### Parse PRD
|
||||
|
||||
```bash
|
||||
# Parse a PRD file and generate tasks
|
||||
task-master parse-prd <prd-file.txt>
|
||||
|
||||
# Limit the number of tasks generated
|
||||
task-master parse-prd <prd-file.txt> --num-tasks=10
|
||||
```
|
||||
|
||||
### List Tasks
|
||||
|
||||
```bash
|
||||
# List all tasks
|
||||
task-master list
|
||||
|
||||
# List tasks with a specific status
|
||||
task-master list --status=<status>
|
||||
|
||||
# List tasks with subtasks
|
||||
task-master list --with-subtasks
|
||||
|
||||
# List tasks with a specific status and include subtasks
|
||||
task-master list --status=<status> --with-subtasks
|
||||
```
|
||||
|
||||
### Show Next Task
|
||||
|
||||
```bash
|
||||
# Show the next task to work on based on dependencies and status
|
||||
task-master next
|
||||
```
|
||||
|
||||
### Show Specific Task
|
||||
|
||||
```bash
|
||||
# Show details of a specific task
|
||||
task-master show <id>
|
||||
# or
|
||||
task-master show --id=<id>
|
||||
|
||||
# View a specific subtask (e.g., subtask 2 of task 1)
|
||||
task-master show 1.2
|
||||
```
|
||||
|
||||
### Update Tasks
|
||||
|
||||
```bash
|
||||
# Update tasks from a specific ID and provide context
|
||||
task-master update --from=<id> --prompt="<prompt>"
|
||||
```
|
||||
Unlike the `update-task` command which replaces task information, the `update-subtask` command _appends_ new information to the existing subtask details, marking it with a timestamp. This is useful for iteratively enhancing subtasks while preserving the original content.
|
||||
|
||||
### Generate Task Files
|
||||
|
||||
|
||||
@@ -21,9 +21,11 @@ In an AI-driven development process—particularly with tools like [Cursor](http
|
||||
The script can be configured through environment variables in a `.env` file at the root of the project:
|
||||
|
||||
### Required Configuration
|
||||
|
||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
||||
|
||||
### Optional Configuration
|
||||
|
||||
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
||||
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
||||
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
||||
@@ -38,9 +40,10 @@ The script can be configured through environment variables in a `.env` file at t
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **`tasks.json`**:
|
||||
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
||||
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
||||
1. **`tasks.json`**:
|
||||
|
||||
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
||||
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||
|
||||
@@ -50,7 +53,7 @@ The script can be configured through environment variables in a `.env` file at t
|
||||
```bash
|
||||
# If installed globally
|
||||
task-master [command] [options]
|
||||
|
||||
|
||||
# If using locally within the project
|
||||
node scripts/dev.js [command] [options]
|
||||
```
|
||||
@@ -111,6 +114,7 @@ task-master update --file=custom-tasks.json --from=5 --prompt="Change database f
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- The `--prompt` parameter is required and should explain the changes or new context
|
||||
- Only tasks that aren't marked as 'done' will be updated
|
||||
- Tasks with ID >= the specified --from value will be updated
|
||||
@@ -134,6 +138,7 @@ task-master set-status --id=1,2,3 --status=done
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
|
||||
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
|
||||
- You can specify multiple task IDs by separating them with commas
|
||||
@@ -183,6 +188,7 @@ task-master clear-subtasks --all
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- After clearing subtasks, task files are automatically regenerated
|
||||
- This is useful when you want to regenerate subtasks with a different approach
|
||||
- Can be combined with the `expand` command to immediately generate new subtasks
|
||||
@@ -198,6 +204,7 @@ The script integrates with two AI services:
|
||||
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
|
||||
|
||||
To use the Perplexity integration:
|
||||
|
||||
1. Obtain a Perplexity API key
|
||||
2. Add `PERPLEXITY_API_KEY` to your `.env` file
|
||||
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
|
||||
@@ -206,6 +213,7 @@ To use the Perplexity integration:
|
||||
## Logging
|
||||
|
||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
||||
|
||||
- `debug`: Detailed information, typically useful for troubleshooting
|
||||
- `info`: Confirmation that things are working as expected (default)
|
||||
- `warn`: Warning messages that don't prevent execution
|
||||
@@ -228,17 +236,20 @@ task-master remove-dependency --id=<id> --depends-on=<id>
|
||||
These commands:
|
||||
|
||||
1. **Allow precise dependency management**:
|
||||
|
||||
- Add dependencies between tasks with automatic validation
|
||||
- Remove dependencies when they're no longer needed
|
||||
- Update task files automatically after changes
|
||||
|
||||
2. **Include validation checks**:
|
||||
|
||||
- Prevent circular dependencies (a task depending on itself)
|
||||
- Prevent duplicate dependencies
|
||||
- Verify that both tasks exist before adding/removing dependencies
|
||||
- Check if dependencies exist before attempting to remove them
|
||||
|
||||
3. **Provide clear feedback**:
|
||||
|
||||
- Success messages confirm when dependencies are added/removed
|
||||
- Error messages explain why operations failed (if applicable)
|
||||
|
||||
@@ -263,6 +274,7 @@ task-master validate-dependencies --file=custom-tasks.json
|
||||
```
|
||||
|
||||
This command:
|
||||
|
||||
- Scans all tasks and subtasks for non-existent dependencies
|
||||
- Identifies potential self-dependencies (tasks referencing themselves)
|
||||
- Reports all found issues without modifying files
|
||||
@@ -284,6 +296,7 @@ task-master fix-dependencies --file=custom-tasks.json
|
||||
```
|
||||
|
||||
This command:
|
||||
|
||||
1. **Validates all dependencies** across tasks and subtasks
|
||||
2. **Automatically removes**:
|
||||
- References to non-existent tasks and subtasks
|
||||
@@ -321,6 +334,7 @@ task-master analyze-complexity --research
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
|
||||
- Tasks are scored on a scale of 1-10
|
||||
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
|
||||
@@ -345,33 +359,35 @@ task-master expand --id=8 --num=5 --prompt="Custom prompt"
|
||||
```
|
||||
|
||||
When a complexity report exists:
|
||||
|
||||
- The `expand` command will use the recommended subtask count from the report (unless overridden)
|
||||
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
|
||||
- When using `--all`, tasks are sorted by complexity score (highest first)
|
||||
- The `--research` flag is preserved from the complexity analysis to expansion
|
||||
|
||||
The output report structure is:
|
||||
|
||||
```json
|
||||
{
|
||||
"meta": {
|
||||
"generatedAt": "2023-06-15T12:34:56.789Z",
|
||||
"tasksAnalyzed": 20,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Your Project Name",
|
||||
"usedResearch": true
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 8,
|
||||
"taskTitle": "Develop Implementation Drift Handling",
|
||||
"complexityScore": 9.5,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Create subtasks that handle detecting...",
|
||||
"reasoning": "This task requires sophisticated logic...",
|
||||
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
||||
},
|
||||
// More tasks sorted by complexity score (highest first)
|
||||
]
|
||||
"meta": {
|
||||
"generatedAt": "2023-06-15T12:34:56.789Z",
|
||||
"tasksAnalyzed": 20,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Your Project Name",
|
||||
"usedResearch": true
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 8,
|
||||
"taskTitle": "Develop Implementation Drift Handling",
|
||||
"complexityScore": 9.5,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Create subtasks that handle detecting...",
|
||||
"reasoning": "This task requires sophisticated logic...",
|
||||
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
||||
}
|
||||
// More tasks sorted by complexity score (highest first)
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -438,4 +454,4 @@ This command:
|
||||
- Commands for working with subtasks
|
||||
- For subtasks, provides a link to view the parent task
|
||||
|
||||
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
|
||||
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
|
||||
|
||||
@@ -20,11 +20,11 @@ const args = process.argv.slice(2);
|
||||
|
||||
// Spawn the init script with all arguments
|
||||
const child = spawn('node', [initScriptPath, ...args], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
// Handle exit
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
process.exit(code);
|
||||
});
|
||||
|
||||
@@ -1,5 +1,20 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Task Master
|
||||
* Copyright (c) 2025 Eyal Toledano, Ralph Khreish
|
||||
*
|
||||
* This software is licensed under the MIT License with Commons Clause.
|
||||
* You may use this software for any purpose, including commercial applications,
|
||||
* and modify and redistribute it freely, subject to the following restrictions:
|
||||
*
|
||||
* 1. You may not sell this software or offer it as a service.
|
||||
* 2. The origin of this software must not be misrepresented.
|
||||
* 3. Altered source versions must be plainly marked as such.
|
||||
*
|
||||
* For the full license text, see the LICENSE file in the root directory.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Claude Task Master CLI
|
||||
* Main entry point for globally installed package
|
||||
@@ -13,6 +28,7 @@ import { Command } from 'commander';
|
||||
import { displayHelp, displayBanner } from '../scripts/modules/ui.js';
|
||||
import { registerCommands } from '../scripts/modules/commands.js';
|
||||
import { detectCamelCaseFlags } from '../scripts/modules/utils.js';
|
||||
import chalk from 'chalk';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
@@ -28,30 +44,36 @@ const initScriptPath = resolve(__dirname, '../scripts/init.js');
|
||||
|
||||
// Helper function to run dev.js with arguments
|
||||
function runDevScript(args) {
|
||||
// Debug: Show the transformed arguments when DEBUG=1 is set
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error('\nDEBUG - CLI Wrapper Analysis:');
|
||||
console.error('- Original command: ' + process.argv.join(' '));
|
||||
console.error('- Transformed args: ' + args.join(' '));
|
||||
console.error('- dev.js will receive: node ' + devScriptPath + ' ' + args.join(' ') + '\n');
|
||||
}
|
||||
|
||||
// For testing: If TEST_MODE is set, just print args and exit
|
||||
if (process.env.TEST_MODE === '1') {
|
||||
console.log('Would execute:');
|
||||
console.log(`node ${devScriptPath} ${args.join(' ')}`);
|
||||
process.exit(0);
|
||||
return;
|
||||
}
|
||||
|
||||
const child = spawn('node', [devScriptPath, ...args], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
// Debug: Show the transformed arguments when DEBUG=1 is set
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error('\nDEBUG - CLI Wrapper Analysis:');
|
||||
console.error('- Original command: ' + process.argv.join(' '));
|
||||
console.error('- Transformed args: ' + args.join(' '));
|
||||
console.error(
|
||||
'- dev.js will receive: node ' +
|
||||
devScriptPath +
|
||||
' ' +
|
||||
args.join(' ') +
|
||||
'\n'
|
||||
);
|
||||
}
|
||||
|
||||
// For testing: If TEST_MODE is set, just print args and exit
|
||||
if (process.env.TEST_MODE === '1') {
|
||||
console.log('Would execute:');
|
||||
console.log(`node ${devScriptPath} ${args.join(' ')}`);
|
||||
process.exit(0);
|
||||
return;
|
||||
}
|
||||
|
||||
const child = spawn('node', [devScriptPath, ...args], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
}
|
||||
|
||||
// Helper function to detect camelCase and convert to kebab-case
|
||||
@@ -63,245 +85,296 @@ const toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||
* @returns {Function} Wrapper action function
|
||||
*/
|
||||
function createDevScriptAction(commandName) {
|
||||
return (options, cmd) => {
|
||||
// Check for camelCase flags and error out with helpful message
|
||||
const camelCaseFlags = detectCamelCaseFlags(process.argv);
|
||||
|
||||
// If camelCase flags were found, show error and exit
|
||||
if (camelCaseFlags.length > 0) {
|
||||
console.error('\nError: Please use kebab-case for CLI flags:');
|
||||
camelCaseFlags.forEach(flag => {
|
||||
console.error(` Instead of: --${flag.original}`);
|
||||
console.error(` Use: --${flag.kebabCase}`);
|
||||
});
|
||||
console.error('\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Since we've ensured no camelCase flags, we can now just:
|
||||
// 1. Start with the command name
|
||||
const args = [commandName];
|
||||
|
||||
// 3. Get positional arguments and explicit flags from the command line
|
||||
const commandArgs = [];
|
||||
const positionals = new Set(); // Track positional args we've seen
|
||||
|
||||
// Find the command in raw process.argv to extract args
|
||||
const commandIndex = process.argv.indexOf(commandName);
|
||||
if (commandIndex !== -1) {
|
||||
// Process all args after the command name
|
||||
for (let i = commandIndex + 1; i < process.argv.length; i++) {
|
||||
const arg = process.argv[i];
|
||||
|
||||
if (arg.startsWith('--')) {
|
||||
// It's a flag - pass through as is
|
||||
commandArgs.push(arg);
|
||||
// Skip the next arg if this is a flag with a value (not --flag=value format)
|
||||
if (!arg.includes('=') &&
|
||||
i + 1 < process.argv.length &&
|
||||
!process.argv[i+1].startsWith('--')) {
|
||||
commandArgs.push(process.argv[++i]);
|
||||
}
|
||||
} else if (!positionals.has(arg)) {
|
||||
// It's a positional argument we haven't seen
|
||||
commandArgs.push(arg);
|
||||
positionals.add(arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add all command line args we collected
|
||||
args.push(...commandArgs);
|
||||
|
||||
// 4. Add default options from Commander if not specified on command line
|
||||
// Track which options we've seen on the command line
|
||||
const userOptions = new Set();
|
||||
for (const arg of commandArgs) {
|
||||
if (arg.startsWith('--')) {
|
||||
// Extract option name (without -- and value)
|
||||
const name = arg.split('=')[0].slice(2);
|
||||
userOptions.add(name);
|
||||
|
||||
// Add the kebab-case version too, to prevent duplicates
|
||||
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||
userOptions.add(kebabName);
|
||||
|
||||
// Add the camelCase version as well
|
||||
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) => letter.toUpperCase());
|
||||
userOptions.add(camelName);
|
||||
}
|
||||
}
|
||||
|
||||
// Add Commander-provided defaults for options not specified by user
|
||||
Object.entries(options).forEach(([key, value]) => {
|
||||
// Debug output to see what keys we're getting
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error(`DEBUG - Processing option: ${key} = ${value}`);
|
||||
}
|
||||
return (options, cmd) => {
|
||||
// Check for camelCase flags and error out with helpful message
|
||||
const camelCaseFlags = detectCamelCaseFlags(process.argv);
|
||||
|
||||
// Special case for numTasks > num-tasks (a known problem case)
|
||||
if (key === 'numTasks') {
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error('DEBUG - Converting numTasks to num-tasks');
|
||||
}
|
||||
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
|
||||
args.push(`--num-tasks=${value}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip built-in Commander properties and options the user provided
|
||||
if (['parent', 'commands', 'options', 'rawArgs'].includes(key) || userOptions.has(key)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Also check the kebab-case version of this key
|
||||
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||
if (userOptions.has(kebabKey)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Add default values, using kebab-case for the parameter name
|
||||
if (value !== undefined) {
|
||||
if (typeof value === 'boolean') {
|
||||
if (value === true) {
|
||||
args.push(`--${kebabKey}`);
|
||||
} else if (value === false && key === 'generate') {
|
||||
args.push('--no-generate');
|
||||
}
|
||||
} else {
|
||||
// Always use kebab-case for option names
|
||||
args.push(`--${kebabKey}=${value}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Special handling for parent parameter (uses -p)
|
||||
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
|
||||
args.push('-p', options.parent);
|
||||
}
|
||||
|
||||
// Debug output for troubleshooting
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error('DEBUG - Command args:', commandArgs);
|
||||
console.error('DEBUG - User options:', Array.from(userOptions));
|
||||
console.error('DEBUG - Commander options:', options);
|
||||
console.error('DEBUG - Final args:', args);
|
||||
}
|
||||
|
||||
// Run the script with our processed args
|
||||
runDevScript(args);
|
||||
};
|
||||
// If camelCase flags were found, show error and exit
|
||||
if (camelCaseFlags.length > 0) {
|
||||
console.error('\nError: Please use kebab-case for CLI flags:');
|
||||
camelCaseFlags.forEach((flag) => {
|
||||
console.error(` Instead of: --${flag.original}`);
|
||||
console.error(` Use: --${flag.kebabCase}`);
|
||||
});
|
||||
console.error(
|
||||
'\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n'
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Since we've ensured no camelCase flags, we can now just:
|
||||
// 1. Start with the command name
|
||||
const args = [commandName];
|
||||
|
||||
// 3. Get positional arguments and explicit flags from the command line
|
||||
const commandArgs = [];
|
||||
const positionals = new Set(); // Track positional args we've seen
|
||||
|
||||
// Find the command in raw process.argv to extract args
|
||||
const commandIndex = process.argv.indexOf(commandName);
|
||||
if (commandIndex !== -1) {
|
||||
// Process all args after the command name
|
||||
for (let i = commandIndex + 1; i < process.argv.length; i++) {
|
||||
const arg = process.argv[i];
|
||||
|
||||
if (arg.startsWith('--')) {
|
||||
// It's a flag - pass through as is
|
||||
commandArgs.push(arg);
|
||||
// Skip the next arg if this is a flag with a value (not --flag=value format)
|
||||
if (
|
||||
!arg.includes('=') &&
|
||||
i + 1 < process.argv.length &&
|
||||
!process.argv[i + 1].startsWith('--')
|
||||
) {
|
||||
commandArgs.push(process.argv[++i]);
|
||||
}
|
||||
} else if (!positionals.has(arg)) {
|
||||
// It's a positional argument we haven't seen
|
||||
commandArgs.push(arg);
|
||||
positionals.add(arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add all command line args we collected
|
||||
args.push(...commandArgs);
|
||||
|
||||
// 4. Add default options from Commander if not specified on command line
|
||||
// Track which options we've seen on the command line
|
||||
const userOptions = new Set();
|
||||
for (const arg of commandArgs) {
|
||||
if (arg.startsWith('--')) {
|
||||
// Extract option name (without -- and value)
|
||||
const name = arg.split('=')[0].slice(2);
|
||||
userOptions.add(name);
|
||||
|
||||
// Add the kebab-case version too, to prevent duplicates
|
||||
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||
userOptions.add(kebabName);
|
||||
|
||||
// Add the camelCase version as well
|
||||
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) =>
|
||||
letter.toUpperCase()
|
||||
);
|
||||
userOptions.add(camelName);
|
||||
}
|
||||
}
|
||||
|
||||
// Add Commander-provided defaults for options not specified by user
|
||||
Object.entries(options).forEach(([key, value]) => {
|
||||
// Debug output to see what keys we're getting
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error(`DEBUG - Processing option: ${key} = ${value}`);
|
||||
}
|
||||
|
||||
// Special case for numTasks > num-tasks (a known problem case)
|
||||
if (key === 'numTasks') {
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error('DEBUG - Converting numTasks to num-tasks');
|
||||
}
|
||||
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
|
||||
args.push(`--num-tasks=${value}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip built-in Commander properties and options the user provided
|
||||
if (
|
||||
['parent', 'commands', 'options', 'rawArgs'].includes(key) ||
|
||||
userOptions.has(key)
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Also check the kebab-case version of this key
|
||||
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||
if (userOptions.has(kebabKey)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Add default values, using kebab-case for the parameter name
|
||||
if (value !== undefined) {
|
||||
if (typeof value === 'boolean') {
|
||||
if (value === true) {
|
||||
args.push(`--${kebabKey}`);
|
||||
} else if (value === false && key === 'generate') {
|
||||
args.push('--skip-generate');
|
||||
}
|
||||
} else {
|
||||
// Always use kebab-case for option names
|
||||
args.push(`--${kebabKey}=${value}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Special handling for parent parameter (uses -p)
|
||||
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
|
||||
args.push('-p', options.parent);
|
||||
}
|
||||
|
||||
// Debug output for troubleshooting
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error('DEBUG - Command args:', commandArgs);
|
||||
console.error('DEBUG - User options:', Array.from(userOptions));
|
||||
console.error('DEBUG - Commander options:', options);
|
||||
console.error('DEBUG - Final args:', args);
|
||||
}
|
||||
|
||||
// Run the script with our processed args
|
||||
runDevScript(args);
|
||||
};
|
||||
}
|
||||
|
||||
// Special case for the 'init' command which uses a different script
|
||||
function registerInitCommand(program) {
|
||||
program
|
||||
.command('init')
|
||||
.description('Initialize a new project')
|
||||
.option('-y, --yes', 'Skip prompts and use default values')
|
||||
.option('-n, --name <name>', 'Project name')
|
||||
.option('-d, --description <description>', 'Project description')
|
||||
.option('-v, --version <version>', 'Project version')
|
||||
.option('-a, --author <author>', 'Author name')
|
||||
.option('--skip-install', 'Skip installing dependencies')
|
||||
.option('--dry-run', 'Show what would be done without making changes')
|
||||
.action((options) => {
|
||||
// Pass through any options to the init script
|
||||
const args = ['--yes', 'name', 'description', 'version', 'author', 'skip-install', 'dry-run']
|
||||
.filter(opt => options[opt])
|
||||
.map(opt => {
|
||||
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
|
||||
return `--${opt}`;
|
||||
}
|
||||
return `--${opt}=${options[opt]}`;
|
||||
});
|
||||
|
||||
const child = spawn('node', [initScriptPath, ...args], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
program
|
||||
.command('init')
|
||||
.description('Initialize a new project')
|
||||
.option('-y, --yes', 'Skip prompts and use default values')
|
||||
.option('-n, --name <name>', 'Project name')
|
||||
.option('-d, --description <description>', 'Project description')
|
||||
.option('-v, --version <version>', 'Project version')
|
||||
.option('-a, --author <author>', 'Author name')
|
||||
.option('--skip-install', 'Skip installing dependencies')
|
||||
.option('--dry-run', 'Show what would be done without making changes')
|
||||
.action((options) => {
|
||||
// Pass through any options to the init script
|
||||
const args = [
|
||||
'--yes',
|
||||
'name',
|
||||
'description',
|
||||
'version',
|
||||
'author',
|
||||
'skip-install',
|
||||
'dry-run'
|
||||
]
|
||||
.filter((opt) => options[opt])
|
||||
.map((opt) => {
|
||||
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
|
||||
return `--${opt}`;
|
||||
}
|
||||
return `--${opt}=${options[opt]}`;
|
||||
});
|
||||
|
||||
const child = spawn('node', [initScriptPath, ...args], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Set up the command-line interface
|
||||
const program = new Command();
|
||||
|
||||
program
|
||||
.name('task-master')
|
||||
.description('Claude Task Master CLI')
|
||||
.version(version)
|
||||
.addHelpText('afterAll', () => {
|
||||
// Use the same help display function as dev.js for consistency
|
||||
displayHelp();
|
||||
return ''; // Return empty string to prevent commander's default help
|
||||
});
|
||||
.name('task-master')
|
||||
.description('Claude Task Master CLI')
|
||||
.version(version)
|
||||
.addHelpText('afterAll', () => {
|
||||
// Use the same help display function as dev.js for consistency
|
||||
displayHelp();
|
||||
return ''; // Return empty string to prevent commander's default help
|
||||
});
|
||||
|
||||
// Add custom help option to directly call our help display
|
||||
program.helpOption('-h, --help', 'Display help information');
|
||||
program.on('--help', () => {
|
||||
displayHelp();
|
||||
displayHelp();
|
||||
});
|
||||
|
||||
// Add special case commands
|
||||
registerInitCommand(program);
|
||||
|
||||
program
|
||||
.command('dev')
|
||||
.description('Run the dev.js script')
|
||||
.allowUnknownOption(true)
|
||||
.action(() => {
|
||||
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
||||
runDevScript(args);
|
||||
});
|
||||
.command('dev')
|
||||
.description('Run the dev.js script')
|
||||
.action(() => {
|
||||
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
||||
runDevScript(args);
|
||||
});
|
||||
|
||||
// Use a temporary Command instance to get all command definitions
|
||||
const tempProgram = new Command();
|
||||
registerCommands(tempProgram);
|
||||
|
||||
// For each command in the temp instance, add a modified version to our actual program
|
||||
tempProgram.commands.forEach(cmd => {
|
||||
if (['init', 'dev'].includes(cmd.name())) {
|
||||
// Skip commands we've already defined specially
|
||||
return;
|
||||
}
|
||||
|
||||
// Create a new command with the same name and description
|
||||
const newCmd = program
|
||||
.command(cmd.name())
|
||||
.description(cmd.description())
|
||||
.allowUnknownOption(); // Allow any options, including camelCase ones
|
||||
|
||||
// Copy all options
|
||||
cmd.options.forEach(opt => {
|
||||
newCmd.option(
|
||||
opt.flags,
|
||||
opt.description,
|
||||
opt.defaultValue
|
||||
);
|
||||
});
|
||||
|
||||
// Set the action to proxy to dev.js
|
||||
newCmd.action(createDevScriptAction(cmd.name()));
|
||||
tempProgram.commands.forEach((cmd) => {
|
||||
if (['init', 'dev'].includes(cmd.name())) {
|
||||
// Skip commands we've already defined specially
|
||||
return;
|
||||
}
|
||||
|
||||
// Create a new command with the same name and description
|
||||
const newCmd = program.command(cmd.name()).description(cmd.description());
|
||||
|
||||
// Copy all options
|
||||
cmd.options.forEach((opt) => {
|
||||
newCmd.option(opt.flags, opt.description, opt.defaultValue);
|
||||
});
|
||||
|
||||
// Set the action to proxy to dev.js
|
||||
newCmd.action(createDevScriptAction(cmd.name()));
|
||||
});
|
||||
|
||||
// Parse the command line arguments
|
||||
program.parse(process.argv);
|
||||
|
||||
// Add global error handling for unknown commands and options
|
||||
process.on('uncaughtException', (err) => {
|
||||
// Check if this is a commander.js unknown option error
|
||||
if (err.code === 'commander.unknownOption') {
|
||||
const option = err.message.match(/'([^']+)'/)?.[1];
|
||||
const commandArg = process.argv.find(
|
||||
(arg) =>
|
||||
!arg.startsWith('-') &&
|
||||
arg !== 'task-master' &&
|
||||
!arg.includes('/') &&
|
||||
arg !== 'node'
|
||||
);
|
||||
const command = commandArg || 'unknown';
|
||||
|
||||
console.error(chalk.red(`Error: Unknown option '${option}'`));
|
||||
console.error(
|
||||
chalk.yellow(
|
||||
`Run 'task-master ${command} --help' to see available options for this command`
|
||||
)
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Check if this is a commander.js unknown command error
|
||||
if (err.code === 'commander.unknownCommand') {
|
||||
const command = err.message.match(/'([^']+)'/)?.[1];
|
||||
|
||||
console.error(chalk.red(`Error: Unknown command '${command}'`));
|
||||
console.error(
|
||||
chalk.yellow(`Run 'task-master --help' to see available commands`)
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Handle other uncaught exceptions
|
||||
console.error(chalk.red(`Error: ${err.message}`));
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error(err);
|
||||
}
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Show help if no command was provided (just 'task-master' with no args)
|
||||
if (process.argv.length <= 2) {
|
||||
displayBanner();
|
||||
displayHelp();
|
||||
process.exit(0);
|
||||
displayBanner();
|
||||
displayHelp();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Add exports at the end of the file
|
||||
if (typeof module !== 'undefined') {
|
||||
module.exports = {
|
||||
detectCamelCaseFlags
|
||||
};
|
||||
}
|
||||
module.exports = {
|
||||
detectCamelCaseFlags
|
||||
};
|
||||
}
|
||||
|
||||
269
docs/MCP_INTEGRATION.md
Normal file
269
docs/MCP_INTEGRATION.md
Normal file
@@ -0,0 +1,269 @@
|
||||
# Task Master MCP Integration
|
||||
|
||||
This document outlines how Task Master CLI functionality is integrated with MCP (Master Control Program) architecture to provide both CLI and programmatic API access to features.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The MCP integration uses a layered approach:
|
||||
|
||||
1. **Core Functions** - In `scripts/modules/` contain the main business logic
|
||||
2. **Source Parameter** - Core functions check the `source` parameter to determine behavior
|
||||
3. **Task Master Core** - In `mcp-server/src/core/task-master-core.js` provides direct function imports
|
||||
4. **MCP Tools** - In `mcp-server/src/tools/` register the functions with the MCP server
|
||||
|
||||
```
|
||||
┌─────────────────┐ ┌─────────────────┐
|
||||
│ CLI User │ │ MCP User │
|
||||
└────────┬────────┘ └────────┬────────┘
|
||||
│ │
|
||||
▼ ▼
|
||||
┌────────────────┐ ┌────────────────────┐
|
||||
│ commands.js │ │ MCP Tool API │
|
||||
└────────┬───────┘ └──────────┬─────────┘
|
||||
│ │
|
||||
│ │
|
||||
▼ ▼
|
||||
┌───────────────────────────────────────────────┐
|
||||
│ │
|
||||
│ Core Modules (task-manager.js, etc.) │
|
||||
│ │
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Core Function Pattern
|
||||
|
||||
Core functions should follow this pattern to support both CLI and MCP use:
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* Example function with source parameter support
|
||||
* @param {Object} options - Additional options including source
|
||||
* @returns {Object|undefined} - Returns data when source is 'mcp'
|
||||
*/
|
||||
function exampleFunction(param1, param2, options = {}) {
|
||||
try {
|
||||
// Skip UI for MCP
|
||||
if (options.source !== 'mcp') {
|
||||
displayBanner();
|
||||
console.log(chalk.blue('Processing operation...'));
|
||||
}
|
||||
|
||||
// Do the core business logic
|
||||
const result = doSomething(param1, param2);
|
||||
|
||||
// For MCP, return structured data
|
||||
if (options.source === 'mcp') {
|
||||
return {
|
||||
success: true,
|
||||
data: result
|
||||
};
|
||||
}
|
||||
|
||||
// For CLI, display output
|
||||
console.log(chalk.green('Operation completed successfully!'));
|
||||
} catch (error) {
|
||||
// Handle errors based on source
|
||||
if (options.source === 'mcp') {
|
||||
return {
|
||||
success: false,
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
|
||||
// CLI error handling
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Source-Adapter Utilities
|
||||
|
||||
For convenience, you can use the source adapter helpers in `scripts/modules/source-adapter.js`:
|
||||
|
||||
```javascript
|
||||
import { adaptForMcp, sourceSplitFunction } from './source-adapter.js';
|
||||
|
||||
// Simple adaptation - just adds source parameter support
|
||||
export const simpleFunction = adaptForMcp(originalFunction);
|
||||
|
||||
// Split implementation - completely different code paths for CLI vs MCP
|
||||
export const complexFunction = sourceSplitFunction(
|
||||
// CLI version with UI
|
||||
function (param1, param2) {
|
||||
displayBanner();
|
||||
console.log(`Processing ${param1}...`);
|
||||
// ... CLI implementation
|
||||
},
|
||||
// MCP version with structured return
|
||||
function (param1, param2, options = {}) {
|
||||
// ... MCP implementation
|
||||
return { success: true, data };
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
## Adding New Features
|
||||
|
||||
When adding new features, follow these steps to ensure CLI and MCP compatibility:
|
||||
|
||||
1. **Implement Core Logic** in the appropriate module file
|
||||
2. **Add Source Parameter Support** using the pattern above
|
||||
3. **Add to task-master-core.js** to make it available for direct import
|
||||
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
|
||||
5. **Create Tool Implementation** in `mcp-server/src/tools/`
|
||||
6. **Register the Tool** in `mcp-server/src/tools/index.js`
|
||||
|
||||
### Core Function Implementation
|
||||
|
||||
```javascript
|
||||
// In scripts/modules/task-manager.js
|
||||
export async function newFeature(param1, param2, options = {}) {
|
||||
try {
|
||||
// Source-specific UI
|
||||
if (options.source !== 'mcp') {
|
||||
displayBanner();
|
||||
console.log(chalk.blue('Running new feature...'));
|
||||
}
|
||||
|
||||
// Shared core logic
|
||||
const result = processFeature(param1, param2);
|
||||
|
||||
// Source-specific return handling
|
||||
if (options.source === 'mcp') {
|
||||
return {
|
||||
success: true,
|
||||
data: result
|
||||
};
|
||||
}
|
||||
|
||||
// CLI output
|
||||
console.log(chalk.green('Feature completed successfully!'));
|
||||
displayOutput(result);
|
||||
} catch (error) {
|
||||
// Error handling based on source
|
||||
if (options.source === 'mcp') {
|
||||
return {
|
||||
success: false,
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Task Master Core Update
|
||||
|
||||
```javascript
|
||||
// In mcp-server/src/core/task-master-core.js
|
||||
import { newFeature } from '../../../scripts/modules/task-manager.js';
|
||||
|
||||
// Add to exports
|
||||
export default {
|
||||
// ... existing functions
|
||||
|
||||
async newFeature(args = {}, options = {}) {
|
||||
const { param1, param2 } = args;
|
||||
return executeFunction(newFeature, [param1, param2], options);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Command Map Update
|
||||
|
||||
```javascript
|
||||
// In mcp-server/src/tools/utils.js
|
||||
const commandMap = {
|
||||
// ... existing mappings
|
||||
'new-feature': 'newFeature'
|
||||
};
|
||||
```
|
||||
|
||||
### Tool Implementation
|
||||
|
||||
```javascript
|
||||
// In mcp-server/src/tools/newFeature.js
|
||||
import { z } from 'zod';
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse
|
||||
} from './utils.js';
|
||||
|
||||
export function registerNewFeatureTool(server) {
|
||||
server.addTool({
|
||||
name: 'newFeature',
|
||||
description: 'Run the new feature',
|
||||
parameters: z.object({
|
||||
param1: z.string().describe('First parameter'),
|
||||
param2: z.number().optional().describe('Second parameter'),
|
||||
file: z.string().optional().describe('Path to the tasks file'),
|
||||
projectRoot: z.string().describe('Root directory of the project')
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
|
||||
|
||||
const cmdArgs = [];
|
||||
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
|
||||
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
|
||||
// Execute the command
|
||||
const result = await executeTaskMasterCommand(
|
||||
'new-feature',
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error in new feature: ${error.message}`);
|
||||
return createErrorResponse(`Error in new feature: ${error.message}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### Tool Registration
|
||||
|
||||
```javascript
|
||||
// In mcp-server/src/tools/index.js
|
||||
import { registerNewFeatureTool } from './newFeature.js';
|
||||
|
||||
export function registerTaskMasterTools(server) {
|
||||
// ... existing registrations
|
||||
registerNewFeatureTool(server);
|
||||
}
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Always test your MCP-compatible features with both CLI and MCP interfaces:
|
||||
|
||||
```javascript
|
||||
// Test CLI usage
|
||||
node scripts/dev.js new-feature --param1=test --param2=123
|
||||
|
||||
// Test MCP usage
|
||||
node mcp-server/tests/test-command.js newFeature
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Keep Core Logic DRY** - Share as much logic as possible between CLI and MCP
|
||||
2. **Structured Data for MCP** - Return clean JSON objects from MCP source functions
|
||||
3. **Consistent Error Handling** - Standardize error formats for both interfaces
|
||||
4. **Documentation** - Update MCP tool documentation when adding new features
|
||||
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature
|
||||
3849
docs/fastmcp-docs.txt
Normal file
3849
docs/fastmcp-docs.txt
Normal file
File diff suppressed because it is too large
Load Diff
14618
docs/mcp-js-sdk-docs.txt
Normal file
14618
docs/mcp-js-sdk-docs.txt
Normal file
File diff suppressed because it is too large
Load Diff
6649
docs/mcp-protocol-repo.txt
Normal file
6649
docs/mcp-protocol-repo.txt
Normal file
File diff suppressed because it is too large
Load Diff
1913
docs/mcp-protocol-schema-03262025.json
Normal file
1913
docs/mcp-protocol-schema-03262025.json
Normal file
File diff suppressed because it is too large
Load Diff
9589
docs/mcp-protocol-spec.txt
Normal file
9589
docs/mcp-protocol-spec.txt
Normal file
File diff suppressed because it is too large
Load Diff
205
index.js
205
index.js
@@ -1,5 +1,20 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Task Master
|
||||
* Copyright (c) 2025 Eyal Toledano, Ralph Khreish
|
||||
*
|
||||
* This software is licensed under the MIT License with Commons Clause.
|
||||
* You may use this software for any purpose, including commercial applications,
|
||||
* and modify and redistribute it freely, subject to the following restrictions:
|
||||
*
|
||||
* 1. You may not sell this software or offer it as a service.
|
||||
* 2. The origin of this software must not be misrepresented.
|
||||
* 3. Altered source versions must be plainly marked as such.
|
||||
*
|
||||
* For the full license text, see the LICENSE file in the root directory.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Claude Task Master
|
||||
* A task management system for AI-driven development with Claude
|
||||
@@ -26,27 +41,27 @@ export const devScriptPath = resolve(__dirname, './scripts/dev.js');
|
||||
|
||||
// Export a function to initialize a new project programmatically
|
||||
export const initProject = async (options = {}) => {
|
||||
const init = await import('./scripts/init.js');
|
||||
return init.initializeProject(options);
|
||||
const init = await import('./scripts/init.js');
|
||||
return init.initializeProject(options);
|
||||
};
|
||||
|
||||
// Export a function to run init as a CLI command
|
||||
export const runInitCLI = async () => {
|
||||
// Using spawn to ensure proper handling of stdio and process exit
|
||||
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
child.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`Init script exited with code ${code}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
// Using spawn to ensure proper handling of stdio and process exit
|
||||
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
child.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`Init script exited with code ${code}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
// Export version information
|
||||
@@ -54,81 +69,81 @@ export const version = packageJson.version;
|
||||
|
||||
// CLI implementation
|
||||
if (import.meta.url === `file://${process.argv[1]}`) {
|
||||
const program = new Command();
|
||||
|
||||
program
|
||||
.name('task-master')
|
||||
.description('Claude Task Master CLI')
|
||||
.version(version);
|
||||
|
||||
program
|
||||
.command('init')
|
||||
.description('Initialize a new project')
|
||||
.action(() => {
|
||||
runInitCLI().catch(err => {
|
||||
console.error('Init failed:', err.message);
|
||||
process.exit(1);
|
||||
});
|
||||
});
|
||||
|
||||
program
|
||||
.command('dev')
|
||||
.description('Run the dev.js script')
|
||||
.allowUnknownOption(true)
|
||||
.action(() => {
|
||||
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
||||
const child = spawn('node', [devScriptPath, ...args], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
|
||||
// Add shortcuts for common dev.js commands
|
||||
program
|
||||
.command('list')
|
||||
.description('List all tasks')
|
||||
.action(() => {
|
||||
const child = spawn('node', [devScriptPath, 'list'], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
|
||||
program
|
||||
.command('next')
|
||||
.description('Show the next task to work on')
|
||||
.action(() => {
|
||||
const child = spawn('node', [devScriptPath, 'next'], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
|
||||
program
|
||||
.command('generate')
|
||||
.description('Generate task files')
|
||||
.action(() => {
|
||||
const child = spawn('node', [devScriptPath, 'generate'], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
|
||||
program.parse(process.argv);
|
||||
}
|
||||
const program = new Command();
|
||||
|
||||
program
|
||||
.name('task-master')
|
||||
.description('Claude Task Master CLI')
|
||||
.version(version);
|
||||
|
||||
program
|
||||
.command('init')
|
||||
.description('Initialize a new project')
|
||||
.action(() => {
|
||||
runInitCLI().catch((err) => {
|
||||
console.error('Init failed:', err.message);
|
||||
process.exit(1);
|
||||
});
|
||||
});
|
||||
|
||||
program
|
||||
.command('dev')
|
||||
.description('Run the dev.js script')
|
||||
.allowUnknownOption(true)
|
||||
.action(() => {
|
||||
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
||||
const child = spawn('node', [devScriptPath, ...args], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
|
||||
// Add shortcuts for common dev.js commands
|
||||
program
|
||||
.command('list')
|
||||
.description('List all tasks')
|
||||
.action(() => {
|
||||
const child = spawn('node', [devScriptPath, 'list'], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
|
||||
program
|
||||
.command('next')
|
||||
.description('Show the next task to work on')
|
||||
.action(() => {
|
||||
const child = spawn('node', [devScriptPath, 'next'], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
|
||||
program
|
||||
.command('generate')
|
||||
.description('Generate task files')
|
||||
.action(() => {
|
||||
const child = spawn('node', [devScriptPath, 'generate'], {
|
||||
stdio: 'inherit',
|
||||
cwd: process.cwd()
|
||||
});
|
||||
|
||||
child.on('close', (code) => {
|
||||
process.exit(code);
|
||||
});
|
||||
});
|
||||
|
||||
program.parse(process.argv);
|
||||
}
|
||||
|
||||
109
jest.config.js
109
jest.config.js
@@ -1,55 +1,56 @@
|
||||
export default {
|
||||
// Use Node.js environment for testing
|
||||
testEnvironment: 'node',
|
||||
|
||||
// Automatically clear mock calls between every test
|
||||
clearMocks: true,
|
||||
|
||||
// Indicates whether the coverage information should be collected while executing the test
|
||||
collectCoverage: false,
|
||||
|
||||
// The directory where Jest should output its coverage files
|
||||
coverageDirectory: 'coverage',
|
||||
|
||||
// A list of paths to directories that Jest should use to search for files in
|
||||
roots: ['<rootDir>/tests'],
|
||||
|
||||
// The glob patterns Jest uses to detect test files
|
||||
testMatch: [
|
||||
'**/__tests__/**/*.js',
|
||||
'**/?(*.)+(spec|test).js'
|
||||
],
|
||||
|
||||
// Transform files
|
||||
transform: {},
|
||||
|
||||
// Disable transformations for node_modules
|
||||
transformIgnorePatterns: ['/node_modules/'],
|
||||
|
||||
// Set moduleNameMapper for absolute paths
|
||||
moduleNameMapper: {
|
||||
'^@/(.*)$': '<rootDir>/$1'
|
||||
},
|
||||
|
||||
// Setup module aliases
|
||||
moduleDirectories: ['node_modules', '<rootDir>'],
|
||||
|
||||
// Configure test coverage thresholds
|
||||
coverageThreshold: {
|
||||
global: {
|
||||
branches: 80,
|
||||
functions: 80,
|
||||
lines: 80,
|
||||
statements: 80
|
||||
}
|
||||
},
|
||||
|
||||
// Generate coverage report in these formats
|
||||
coverageReporters: ['text', 'lcov'],
|
||||
|
||||
// Verbose output
|
||||
verbose: true,
|
||||
|
||||
// Setup file
|
||||
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
|
||||
};
|
||||
// Use Node.js environment for testing
|
||||
testEnvironment: 'node',
|
||||
|
||||
// Automatically clear mock calls between every test
|
||||
clearMocks: true,
|
||||
|
||||
// Indicates whether the coverage information should be collected while executing the test
|
||||
collectCoverage: false,
|
||||
|
||||
// The directory where Jest should output its coverage files
|
||||
coverageDirectory: 'coverage',
|
||||
|
||||
// A list of paths to directories that Jest should use to search for files in
|
||||
roots: ['<rootDir>/tests'],
|
||||
|
||||
// The glob patterns Jest uses to detect test files
|
||||
testMatch: [
|
||||
'**/__tests__/**/*.js',
|
||||
'**/?(*.)+(spec|test).js',
|
||||
'**/tests/*.test.js'
|
||||
],
|
||||
|
||||
// Transform files
|
||||
transform: {},
|
||||
|
||||
// Disable transformations for node_modules
|
||||
transformIgnorePatterns: ['/node_modules/'],
|
||||
|
||||
// Set moduleNameMapper for absolute paths
|
||||
moduleNameMapper: {
|
||||
'^@/(.*)$': '<rootDir>/$1'
|
||||
},
|
||||
|
||||
// Setup module aliases
|
||||
moduleDirectories: ['node_modules', '<rootDir>'],
|
||||
|
||||
// Configure test coverage thresholds
|
||||
coverageThreshold: {
|
||||
global: {
|
||||
branches: 80,
|
||||
functions: 80,
|
||||
lines: 80,
|
||||
statements: 80
|
||||
}
|
||||
},
|
||||
|
||||
// Generate coverage report in these formats
|
||||
coverageReporters: ['text', 'lcov'],
|
||||
|
||||
// Verbose output
|
||||
verbose: true,
|
||||
|
||||
// Setup file
|
||||
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
|
||||
};
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import TaskMasterMCPServer from "./src/index.js";
|
||||
import dotenv from "dotenv";
|
||||
import logger from "./src/logger.js";
|
||||
import TaskMasterMCPServer from './src/index.js';
|
||||
import dotenv from 'dotenv';
|
||||
import logger from './src/logger.js';
|
||||
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
@@ -11,25 +11,25 @@ dotenv.config();
|
||||
* Start the MCP server
|
||||
*/
|
||||
async function startServer() {
|
||||
const server = new TaskMasterMCPServer();
|
||||
const server = new TaskMasterMCPServer();
|
||||
|
||||
// Handle graceful shutdown
|
||||
process.on("SIGINT", async () => {
|
||||
await server.stop();
|
||||
process.exit(0);
|
||||
});
|
||||
// Handle graceful shutdown
|
||||
process.on('SIGINT', async () => {
|
||||
await server.stop();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
process.on("SIGTERM", async () => {
|
||||
await server.stop();
|
||||
process.exit(0);
|
||||
});
|
||||
process.on('SIGTERM', async () => {
|
||||
await server.stop();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
try {
|
||||
await server.start();
|
||||
} catch (error) {
|
||||
logger.error(`Failed to start MCP server: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
try {
|
||||
await server.start();
|
||||
} catch (error) {
|
||||
logger.error(`Failed to start MCP server: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Start the server
|
||||
|
||||
91
mcp-server/src/core/__tests__/context-manager.test.js
Normal file
91
mcp-server/src/core/__tests__/context-manager.test.js
Normal file
@@ -0,0 +1,91 @@
|
||||
import { jest } from '@jest/globals';
|
||||
import { ContextManager } from '../context-manager.js';
|
||||
|
||||
describe('ContextManager', () => {
|
||||
let contextManager;
|
||||
|
||||
beforeEach(() => {
|
||||
contextManager = new ContextManager({
|
||||
maxCacheSize: 10,
|
||||
ttl: 1000, // 1 second for testing
|
||||
maxContextSize: 1000
|
||||
});
|
||||
});
|
||||
|
||||
describe('getContext', () => {
|
||||
it('should create a new context when not in cache', async () => {
|
||||
const context = await contextManager.getContext('test-id', {
|
||||
test: true
|
||||
});
|
||||
expect(context.id).toBe('test-id');
|
||||
expect(context.metadata.test).toBe(true);
|
||||
expect(contextManager.stats.misses).toBe(1);
|
||||
expect(contextManager.stats.hits).toBe(0);
|
||||
});
|
||||
|
||||
it('should return cached context when available', async () => {
|
||||
// First call creates the context
|
||||
await contextManager.getContext('test-id', { test: true });
|
||||
|
||||
// Second call should hit cache
|
||||
const context = await contextManager.getContext('test-id', {
|
||||
test: true
|
||||
});
|
||||
expect(context.id).toBe('test-id');
|
||||
expect(context.metadata.test).toBe(true);
|
||||
expect(contextManager.stats.hits).toBe(1);
|
||||
expect(contextManager.stats.misses).toBe(1);
|
||||
});
|
||||
|
||||
it('should respect TTL settings', async () => {
|
||||
// Create context
|
||||
await contextManager.getContext('test-id', { test: true });
|
||||
|
||||
// Wait for TTL to expire
|
||||
await new Promise((resolve) => setTimeout(resolve, 1100));
|
||||
|
||||
// Should create new context
|
||||
await contextManager.getContext('test-id', { test: true });
|
||||
expect(contextManager.stats.misses).toBe(2);
|
||||
expect(contextManager.stats.hits).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('updateContext', () => {
|
||||
it('should update existing context metadata', async () => {
|
||||
await contextManager.getContext('test-id', { initial: true });
|
||||
const updated = await contextManager.updateContext('test-id', {
|
||||
updated: true
|
||||
});
|
||||
|
||||
expect(updated.metadata.initial).toBe(true);
|
||||
expect(updated.metadata.updated).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('invalidateContext', () => {
|
||||
it('should remove context from cache', async () => {
|
||||
await contextManager.getContext('test-id', { test: true });
|
||||
contextManager.invalidateContext('test-id', { test: true });
|
||||
|
||||
// Should be a cache miss
|
||||
await contextManager.getContext('test-id', { test: true });
|
||||
expect(contextManager.stats.invalidations).toBe(1);
|
||||
expect(contextManager.stats.misses).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getStats', () => {
|
||||
it('should return current cache statistics', async () => {
|
||||
await contextManager.getContext('test-id', { test: true });
|
||||
const stats = contextManager.getStats();
|
||||
|
||||
expect(stats.hits).toBe(0);
|
||||
expect(stats.misses).toBe(1);
|
||||
expect(stats.invalidations).toBe(0);
|
||||
expect(stats.size).toBe(1);
|
||||
expect(stats.maxSize).toBe(10);
|
||||
expect(stats.ttl).toBe(1000);
|
||||
});
|
||||
});
|
||||
});
|
||||
171
mcp-server/src/core/context-manager.js
Normal file
171
mcp-server/src/core/context-manager.js
Normal file
@@ -0,0 +1,171 @@
|
||||
/**
|
||||
* context-manager.js
|
||||
* Context and cache management for Task Master MCP Server
|
||||
*/
|
||||
|
||||
import { FastMCP } from 'fastmcp';
|
||||
import { LRUCache } from 'lru-cache';
|
||||
|
||||
/**
|
||||
* Configuration options for the ContextManager
|
||||
* @typedef {Object} ContextManagerConfig
|
||||
* @property {number} maxCacheSize - Maximum number of items in the cache
|
||||
* @property {number} ttl - Time to live for cached items in milliseconds
|
||||
* @property {number} maxContextSize - Maximum size of context window in tokens
|
||||
*/
|
||||
|
||||
export class ContextManager {
|
||||
/**
|
||||
* Create a new ContextManager instance
|
||||
* @param {ContextManagerConfig} config - Configuration options
|
||||
*/
|
||||
constructor(config = {}) {
|
||||
this.config = {
|
||||
maxCacheSize: config.maxCacheSize || 1000,
|
||||
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
|
||||
maxContextSize: config.maxContextSize || 4000
|
||||
};
|
||||
|
||||
// Initialize LRU cache for context data
|
||||
this.cache = new LRUCache({
|
||||
max: this.config.maxCacheSize,
|
||||
ttl: this.config.ttl,
|
||||
updateAgeOnGet: true
|
||||
});
|
||||
|
||||
// Cache statistics
|
||||
this.stats = {
|
||||
hits: 0,
|
||||
misses: 0,
|
||||
invalidations: 0
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new context or retrieve from cache
|
||||
* @param {string} contextId - Unique identifier for the context
|
||||
* @param {Object} metadata - Additional metadata for the context
|
||||
* @returns {Object} Context object with metadata
|
||||
*/
|
||||
async getContext(contextId, metadata = {}) {
|
||||
const cacheKey = this._getCacheKey(contextId, metadata);
|
||||
|
||||
// Try to get from cache first
|
||||
const cached = this.cache.get(cacheKey);
|
||||
if (cached) {
|
||||
this.stats.hits++;
|
||||
return cached;
|
||||
}
|
||||
|
||||
this.stats.misses++;
|
||||
|
||||
// Create new context if not in cache
|
||||
const context = {
|
||||
id: contextId,
|
||||
metadata: {
|
||||
...metadata,
|
||||
created: new Date().toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
// Cache the new context
|
||||
this.cache.set(cacheKey, context);
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update an existing context
|
||||
* @param {string} contextId - Context identifier
|
||||
* @param {Object} updates - Updates to apply to the context
|
||||
* @returns {Object} Updated context
|
||||
*/
|
||||
async updateContext(contextId, updates) {
|
||||
const context = await this.getContext(contextId);
|
||||
|
||||
// Apply updates to context
|
||||
Object.assign(context.metadata, updates);
|
||||
|
||||
// Update cache
|
||||
const cacheKey = this._getCacheKey(contextId, context.metadata);
|
||||
this.cache.set(cacheKey, context);
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidate a context in the cache
|
||||
* @param {string} contextId - Context identifier
|
||||
* @param {Object} metadata - Metadata used in the cache key
|
||||
*/
|
||||
invalidateContext(contextId, metadata = {}) {
|
||||
const cacheKey = this._getCacheKey(contextId, metadata);
|
||||
this.cache.delete(cacheKey);
|
||||
this.stats.invalidations++;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached data associated with a specific key.
|
||||
* Increments cache hit stats if found.
|
||||
* @param {string} key - The cache key.
|
||||
* @returns {any | undefined} The cached data or undefined if not found/expired.
|
||||
*/
|
||||
getCachedData(key) {
|
||||
const cached = this.cache.get(key);
|
||||
if (cached !== undefined) {
|
||||
// Check for undefined specifically, as null/false might be valid cached values
|
||||
this.stats.hits++;
|
||||
return cached;
|
||||
}
|
||||
this.stats.misses++;
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set data in the cache with a specific key.
|
||||
* @param {string} key - The cache key.
|
||||
* @param {any} data - The data to cache.
|
||||
*/
|
||||
setCachedData(key, data) {
|
||||
this.cache.set(key, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidate a specific cache key.
|
||||
* Increments invalidation stats.
|
||||
* @param {string} key - The cache key to invalidate.
|
||||
*/
|
||||
invalidateCacheKey(key) {
|
||||
this.cache.delete(key);
|
||||
this.stats.invalidations++;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache statistics
|
||||
* @returns {Object} Cache statistics
|
||||
*/
|
||||
getStats() {
|
||||
return {
|
||||
hits: this.stats.hits,
|
||||
misses: this.stats.misses,
|
||||
invalidations: this.stats.invalidations,
|
||||
size: this.cache.size,
|
||||
maxSize: this.config.maxCacheSize,
|
||||
ttl: this.config.ttl
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a cache key from context ID and metadata
|
||||
* @private
|
||||
* @deprecated No longer used for direct cache key generation outside the manager.
|
||||
* Prefer generating specific keys in calling functions.
|
||||
*/
|
||||
_getCacheKey(contextId, metadata) {
|
||||
// Kept for potential backward compatibility or internal use if needed later.
|
||||
return `${contextId}:${JSON.stringify(metadata)}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Export a singleton instance with default config
|
||||
export const contextManager = new ContextManager();
|
||||
203
mcp-server/src/core/task-master-core.js
Normal file
203
mcp-server/src/core/task-master-core.js
Normal file
@@ -0,0 +1,203 @@
|
||||
/**
|
||||
* task-master-core.js
|
||||
* Direct function imports from Task Master modules
|
||||
*
|
||||
* This module provides direct access to Task Master core functions
|
||||
* for improved performance and error handling compared to CLI execution.
|
||||
*/
|
||||
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname } from 'path';
|
||||
import fs from 'fs';
|
||||
|
||||
// Get the current module's directory
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
// Import Task Master modules
|
||||
import {
|
||||
listTasks
|
||||
// We'll import more functions as we continue implementation
|
||||
} from '../../../scripts/modules/task-manager.js';
|
||||
|
||||
// Import context manager
|
||||
import { contextManager } from './context-manager.js';
|
||||
import { getCachedOrExecute } from '../tools/utils.js'; // Import the utility here
|
||||
|
||||
/**
|
||||
* Finds the absolute path to the tasks.json file based on project root and arguments.
|
||||
* @param {Object} args - Command arguments, potentially including 'projectRoot' and 'file'.
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {string} - Absolute path to the tasks.json file.
|
||||
* @throws {Error} - If tasks.json cannot be found.
|
||||
*/
|
||||
function findTasksJsonPath(args, log) {
|
||||
// Assume projectRoot is already normalized absolute path if passed in args
|
||||
// Or use getProjectRoot if we decide to centralize that logic
|
||||
const projectRoot = args.projectRoot || process.cwd();
|
||||
log.info(`Searching for tasks.json within project root: ${projectRoot}`);
|
||||
|
||||
const possiblePaths = [];
|
||||
|
||||
// 1. If a file is explicitly provided relative to projectRoot
|
||||
if (args.file) {
|
||||
possiblePaths.push(path.resolve(projectRoot, args.file));
|
||||
}
|
||||
|
||||
// 2. Check the standard locations relative to projectRoot
|
||||
possiblePaths.push(
|
||||
path.join(projectRoot, 'tasks.json'),
|
||||
path.join(projectRoot, 'tasks', 'tasks.json')
|
||||
);
|
||||
|
||||
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
|
||||
|
||||
// Find the first existing path
|
||||
for (const p of possiblePaths) {
|
||||
if (fs.existsSync(p)) {
|
||||
log.info(`Found tasks file at: ${p}`);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
// If no file was found, throw an error
|
||||
const error = new Error(
|
||||
`Tasks file not found in any of the expected locations relative to ${projectRoot}: ${possiblePaths.join(', ')}`
|
||||
);
|
||||
error.code = 'TASKS_FILE_NOT_FOUND';
|
||||
throw error;
|
||||
}
|
||||
|
||||
/**
|
||||
* Direct function wrapper for listTasks with error handling and caching.
|
||||
*
|
||||
* @param {Object} args - Command arguments (projectRoot is expected to be resolved).
|
||||
* @param {Object} log - Logger object.
|
||||
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
|
||||
*/
|
||||
export async function listTasksDirect(args, log) {
|
||||
let tasksPath;
|
||||
try {
|
||||
// Find the tasks path first - needed for cache key and execution
|
||||
tasksPath = findTasksJsonPath(args, log);
|
||||
} catch (error) {
|
||||
if (error.code === 'TASKS_FILE_NOT_FOUND') {
|
||||
log.error(`Tasks file not found: ${error.message}`);
|
||||
// Return the error structure expected by the calling tool/handler
|
||||
return {
|
||||
success: false,
|
||||
error: { code: error.code, message: error.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
log.error(`Unexpected error finding tasks file: ${error.message}`);
|
||||
// Re-throw for outer catch or return structured error
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
// Generate cache key *after* finding tasksPath
|
||||
const statusFilter = args.status || 'all';
|
||||
const withSubtasks = args.withSubtasks || false;
|
||||
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreListTasksAction = async () => {
|
||||
try {
|
||||
log.info(
|
||||
`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`
|
||||
);
|
||||
const resultData = listTasks(
|
||||
tasksPath,
|
||||
statusFilter,
|
||||
withSubtasks,
|
||||
'json'
|
||||
);
|
||||
|
||||
if (!resultData || !resultData.tasks) {
|
||||
log.error('Invalid or empty response from listTasks core function');
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_CORE_RESPONSE',
|
||||
message: 'Invalid or empty response from listTasks core function'
|
||||
}
|
||||
};
|
||||
}
|
||||
log.info(
|
||||
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
|
||||
);
|
||||
return { success: true, data: resultData };
|
||||
} catch (error) {
|
||||
log.error(`Core listTasks function failed: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'LIST_TASKS_CORE_ERROR',
|
||||
message: error.message || 'Failed to list tasks'
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreListTasksAction,
|
||||
log
|
||||
});
|
||||
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch (error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
||||
log.error(
|
||||
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
|
||||
);
|
||||
console.error(error.stack);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'CACHE_UTIL_ERROR', message: error.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache statistics for monitoring
|
||||
* @param {Object} args - Command arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {Object} - Cache statistics
|
||||
*/
|
||||
export async function getCacheStatsDirect(args, log) {
|
||||
try {
|
||||
log.info('Retrieving cache statistics');
|
||||
const stats = contextManager.getStats();
|
||||
return {
|
||||
success: true,
|
||||
data: stats
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Error getting cache stats: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'CACHE_STATS_ERROR',
|
||||
message: error.message || 'Unknown error occurred'
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps Task Master functions to their direct implementation
|
||||
*/
|
||||
export const directFunctions = {
|
||||
list: listTasksDirect,
|
||||
cacheStats: getCacheStatsDirect
|
||||
// Add more functions as we implement them
|
||||
};
|
||||
@@ -1,10 +1,10 @@
|
||||
import { FastMCP } from "fastmcp";
|
||||
import path from "path";
|
||||
import dotenv from "dotenv";
|
||||
import { fileURLToPath } from "url";
|
||||
import fs from "fs";
|
||||
import logger from "./logger.js";
|
||||
import { registerTaskMasterTools } from "./tools/index.js";
|
||||
import { FastMCP } from 'fastmcp';
|
||||
import path from 'path';
|
||||
import dotenv from 'dotenv';
|
||||
import { fileURLToPath } from 'url';
|
||||
import fs from 'fs';
|
||||
import logger from './logger.js';
|
||||
import { registerTaskMasterTools } from './tools/index.js';
|
||||
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
@@ -17,70 +17,70 @@ const __dirname = path.dirname(__filename);
|
||||
* Main MCP server class that integrates with Task Master
|
||||
*/
|
||||
class TaskMasterMCPServer {
|
||||
constructor() {
|
||||
// Get version from package.json using synchronous fs
|
||||
const packagePath = path.join(__dirname, "../../package.json");
|
||||
const packageJson = JSON.parse(fs.readFileSync(packagePath, "utf8"));
|
||||
constructor() {
|
||||
// Get version from package.json using synchronous fs
|
||||
const packagePath = path.join(__dirname, '../../package.json');
|
||||
const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8'));
|
||||
|
||||
this.options = {
|
||||
name: "Task Master MCP Server",
|
||||
version: packageJson.version,
|
||||
};
|
||||
this.options = {
|
||||
name: 'Task Master MCP Server',
|
||||
version: packageJson.version
|
||||
};
|
||||
|
||||
this.server = new FastMCP(this.options);
|
||||
this.initialized = false;
|
||||
this.server = new FastMCP(this.options);
|
||||
this.initialized = false;
|
||||
|
||||
// this.server.addResource({});
|
||||
// this.server.addResource({});
|
||||
|
||||
// this.server.addResourceTemplate({});
|
||||
// this.server.addResourceTemplate({});
|
||||
|
||||
// Bind methods
|
||||
this.init = this.init.bind(this);
|
||||
this.start = this.start.bind(this);
|
||||
this.stop = this.stop.bind(this);
|
||||
// Bind methods
|
||||
this.init = this.init.bind(this);
|
||||
this.start = this.start.bind(this);
|
||||
this.stop = this.stop.bind(this);
|
||||
|
||||
// Setup logging
|
||||
this.logger = logger;
|
||||
}
|
||||
// Setup logging
|
||||
this.logger = logger;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the MCP server with necessary tools and routes
|
||||
*/
|
||||
async init() {
|
||||
if (this.initialized) return;
|
||||
/**
|
||||
* Initialize the MCP server with necessary tools and routes
|
||||
*/
|
||||
async init() {
|
||||
if (this.initialized) return;
|
||||
|
||||
// Register Task Master tools
|
||||
registerTaskMasterTools(this.server);
|
||||
// Register Task Master tools
|
||||
registerTaskMasterTools(this.server);
|
||||
|
||||
this.initialized = true;
|
||||
this.initialized = true;
|
||||
|
||||
return this;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the MCP server
|
||||
*/
|
||||
async start() {
|
||||
if (!this.initialized) {
|
||||
await this.init();
|
||||
}
|
||||
/**
|
||||
* Start the MCP server
|
||||
*/
|
||||
async start() {
|
||||
if (!this.initialized) {
|
||||
await this.init();
|
||||
}
|
||||
|
||||
// Start the FastMCP server
|
||||
await this.server.start({
|
||||
transportType: "stdio",
|
||||
});
|
||||
// Start the FastMCP server
|
||||
await this.server.start({
|
||||
transportType: 'stdio'
|
||||
});
|
||||
|
||||
return this;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the MCP server
|
||||
*/
|
||||
async stop() {
|
||||
if (this.server) {
|
||||
await this.server.stop();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Stop the MCP server
|
||||
*/
|
||||
async stop() {
|
||||
if (this.server) {
|
||||
await this.server.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default TaskMasterMCPServer;
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
import chalk from "chalk";
|
||||
import chalk from 'chalk';
|
||||
|
||||
// Define log levels
|
||||
const LOG_LEVELS = {
|
||||
debug: 0,
|
||||
info: 1,
|
||||
warn: 2,
|
||||
error: 3,
|
||||
success: 4,
|
||||
debug: 0,
|
||||
info: 1,
|
||||
warn: 2,
|
||||
error: 3,
|
||||
success: 4
|
||||
};
|
||||
|
||||
// Get log level from environment or default to info
|
||||
const LOG_LEVEL = process.env.LOG_LEVEL
|
||||
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
|
||||
: LOG_LEVELS.info;
|
||||
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
|
||||
: LOG_LEVELS.info;
|
||||
|
||||
/**
|
||||
* Logs a message with the specified level
|
||||
@@ -20,29 +20,29 @@ const LOG_LEVEL = process.env.LOG_LEVEL
|
||||
* @param {...any} args - Arguments to log
|
||||
*/
|
||||
function log(level, ...args) {
|
||||
const icons = {
|
||||
debug: chalk.gray("🔍"),
|
||||
info: chalk.blue("ℹ️"),
|
||||
warn: chalk.yellow("⚠️"),
|
||||
error: chalk.red("❌"),
|
||||
success: chalk.green("✅"),
|
||||
};
|
||||
const icons = {
|
||||
debug: chalk.gray('🔍'),
|
||||
info: chalk.blue('ℹ️'),
|
||||
warn: chalk.yellow('⚠️'),
|
||||
error: chalk.red('❌'),
|
||||
success: chalk.green('✅')
|
||||
};
|
||||
|
||||
if (LOG_LEVELS[level] >= LOG_LEVEL) {
|
||||
const icon = icons[level] || "";
|
||||
if (LOG_LEVELS[level] >= LOG_LEVEL) {
|
||||
const icon = icons[level] || '';
|
||||
|
||||
if (level === "error") {
|
||||
console.error(icon, chalk.red(...args));
|
||||
} else if (level === "warn") {
|
||||
console.warn(icon, chalk.yellow(...args));
|
||||
} else if (level === "success") {
|
||||
console.log(icon, chalk.green(...args));
|
||||
} else if (level === "info") {
|
||||
console.log(icon, chalk.blue(...args));
|
||||
} else {
|
||||
console.log(icon, ...args);
|
||||
}
|
||||
}
|
||||
if (level === 'error') {
|
||||
console.error(icon, chalk.red(...args));
|
||||
} else if (level === 'warn') {
|
||||
console.warn(icon, chalk.yellow(...args));
|
||||
} else if (level === 'success') {
|
||||
console.log(icon, chalk.green(...args));
|
||||
} else if (level === 'info') {
|
||||
console.log(icon, chalk.blue(...args));
|
||||
} else {
|
||||
console.log(icon, ...args);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -51,14 +51,14 @@ function log(level, ...args) {
|
||||
* @returns {Object} Logger object with info, error, debug, warn, and success methods
|
||||
*/
|
||||
export function createLogger() {
|
||||
return {
|
||||
debug: (message) => log("debug", message),
|
||||
info: (message) => log("info", message),
|
||||
warn: (message) => log("warn", message),
|
||||
error: (message) => log("error", message),
|
||||
success: (message) => log("success", message),
|
||||
log: log, // Also expose the raw log function
|
||||
};
|
||||
return {
|
||||
debug: (message) => log('debug', message),
|
||||
info: (message) => log('info', message),
|
||||
warn: (message) => log('warn', message),
|
||||
error: (message) => log('error', message),
|
||||
success: (message) => log('success', message),
|
||||
log: log // Also expose the raw log function
|
||||
};
|
||||
}
|
||||
|
||||
// Export a default logger instance
|
||||
|
||||
1
mcp-server/src/logger.js (lines 31-41)
Normal file
1
mcp-server/src/logger.js (lines 31-41)
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
@@ -3,64 +3,64 @@
|
||||
* Tool to add a new task using AI
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import { z } from 'zod';
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse
|
||||
} from './utils.js';
|
||||
|
||||
/**
|
||||
* Register the addTask tool with the MCP server
|
||||
* @param {FastMCP} server - FastMCP server instance
|
||||
*/
|
||||
export function registerAddTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "addTask",
|
||||
description: "Add a new task using AI",
|
||||
parameters: z.object({
|
||||
prompt: z.string().describe("Description of the task to add"),
|
||||
dependencies: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe("Comma-separated list of task IDs this task depends on"),
|
||||
priority: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe("Task priority (high, medium, low)"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Adding new task: ${args.prompt}`);
|
||||
server.addTool({
|
||||
name: 'addTask',
|
||||
description: 'Add a new task using AI',
|
||||
parameters: z.object({
|
||||
prompt: z.string().describe('Description of the task to add'),
|
||||
dependencies: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Comma-separated list of task IDs this task depends on'),
|
||||
priority: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Task priority (high, medium, low)'),
|
||||
file: z.string().optional().describe('Path to the tasks file'),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
'Root directory of the project (default: current working directory)'
|
||||
)
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Adding new task: ${args.prompt}`);
|
||||
|
||||
const cmdArgs = [`--prompt="${args.prompt}"`];
|
||||
if (args.dependencies)
|
||||
cmdArgs.push(`--dependencies=${args.dependencies}`);
|
||||
if (args.priority) cmdArgs.push(`--priority=${args.priority}`);
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
const cmdArgs = [`--prompt="${args.prompt}"`];
|
||||
if (args.dependencies)
|
||||
cmdArgs.push(`--dependencies=${args.dependencies}`);
|
||||
if (args.priority) cmdArgs.push(`--priority=${args.priority}`);
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"add-task",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
const result = executeTaskMasterCommand(
|
||||
'add-task',
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error adding task: ${error.message}`);
|
||||
return createErrorResponse(`Error adding task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error adding task: ${error.message}`);
|
||||
return createErrorResponse(`Error adding task: ${error.message}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -3,76 +3,76 @@
|
||||
* Tool to break down a task into detailed subtasks
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import { z } from 'zod';
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse
|
||||
} from './utils.js';
|
||||
|
||||
/**
|
||||
* Register the expandTask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerExpandTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "expandTask",
|
||||
description: "Break down a task into detailed subtasks",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("Task ID to expand"),
|
||||
num: z.number().optional().describe("Number of subtasks to generate"),
|
||||
research: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
"Enable Perplexity AI for research-backed subtask generation"
|
||||
),
|
||||
prompt: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe("Additional context to guide subtask generation"),
|
||||
force: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
"Force regeneration of subtasks for tasks that already have them"
|
||||
),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Expanding task ${args.id}`);
|
||||
server.addTool({
|
||||
name: 'expandTask',
|
||||
description: 'Break down a task into detailed subtasks',
|
||||
parameters: z.object({
|
||||
id: z.string().describe('Task ID to expand'),
|
||||
num: z.number().optional().describe('Number of subtasks to generate'),
|
||||
research: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
'Enable Perplexity AI for research-backed subtask generation'
|
||||
),
|
||||
prompt: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Additional context to guide subtask generation'),
|
||||
force: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
'Force regeneration of subtasks for tasks that already have them'
|
||||
),
|
||||
file: z.string().optional().describe('Path to the tasks file'),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
'Root directory of the project (default: current working directory)'
|
||||
)
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Expanding task ${args.id}`);
|
||||
|
||||
const cmdArgs = [`--id=${args.id}`];
|
||||
if (args.num) cmdArgs.push(`--num=${args.num}`);
|
||||
if (args.research) cmdArgs.push("--research");
|
||||
if (args.prompt) cmdArgs.push(`--prompt="${args.prompt}"`);
|
||||
if (args.force) cmdArgs.push("--force");
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
const cmdArgs = [`--id=${args.id}`];
|
||||
if (args.num) cmdArgs.push(`--num=${args.num}`);
|
||||
if (args.research) cmdArgs.push('--research');
|
||||
if (args.prompt) cmdArgs.push(`--prompt="${args.prompt}"`);
|
||||
if (args.force) cmdArgs.push('--force');
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
const projectRoot = args.projectRoot;
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"expand",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
const result = executeTaskMasterCommand(
|
||||
'expand',
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error expanding task: ${error.message}`);
|
||||
return createErrorResponse(`Error expanding task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error expanding task: ${error.message}`);
|
||||
return createErrorResponse(`Error expanding task: ${error.message}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -3,27 +3,27 @@
|
||||
* Export all Task Master CLI tools for MCP server
|
||||
*/
|
||||
|
||||
import logger from "../logger.js";
|
||||
import { registerListTasksTool } from "./listTasks.js";
|
||||
import { registerShowTaskTool } from "./showTask.js";
|
||||
import { registerSetTaskStatusTool } from "./setTaskStatus.js";
|
||||
import { registerExpandTaskTool } from "./expandTask.js";
|
||||
import { registerNextTaskTool } from "./nextTask.js";
|
||||
import { registerAddTaskTool } from "./addTask.js";
|
||||
import logger from '../logger.js';
|
||||
import { registerListTasksTool } from './listTasks.js';
|
||||
import { registerShowTaskTool } from './showTask.js';
|
||||
import { registerSetTaskStatusTool } from './setTaskStatus.js';
|
||||
import { registerExpandTaskTool } from './expandTask.js';
|
||||
import { registerNextTaskTool } from './nextTask.js';
|
||||
import { registerAddTaskTool } from './addTask.js';
|
||||
|
||||
/**
|
||||
* Register all Task Master tools with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerTaskMasterTools(server) {
|
||||
registerListTasksTool(server);
|
||||
registerShowTaskTool(server);
|
||||
registerSetTaskStatusTool(server);
|
||||
registerExpandTaskTool(server);
|
||||
registerNextTaskTool(server);
|
||||
registerAddTaskTool(server);
|
||||
registerListTasksTool(server);
|
||||
registerShowTaskTool(server);
|
||||
registerSetTaskStatusTool(server);
|
||||
registerExpandTaskTool(server);
|
||||
registerNextTaskTool(server);
|
||||
registerAddTaskTool(server);
|
||||
}
|
||||
|
||||
export default {
|
||||
registerTaskMasterTools,
|
||||
registerTaskMasterTools
|
||||
};
|
||||
|
||||
@@ -3,63 +3,50 @@
|
||||
* Tool to list all tasks from Task Master
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
import { z } from 'zod';
|
||||
import { createErrorResponse, handleApiResult } from './utils.js';
|
||||
import { listTasksDirect } from '../core/task-master-core.js';
|
||||
|
||||
/**
|
||||
* Register the listTasks tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerListTasksTool(server) {
|
||||
server.addTool({
|
||||
name: "listTasks",
|
||||
description: "List all tasks from Task Master",
|
||||
parameters: z.object({
|
||||
status: z.string().optional().describe("Filter tasks by status"),
|
||||
withSubtasks: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe("Include subtasks in the response"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Listing tasks with filters: ${JSON.stringify(args)}`);
|
||||
server.addTool({
|
||||
name: 'listTasks',
|
||||
description: 'List all tasks from Task Master',
|
||||
parameters: z.object({
|
||||
status: z.string().optional().describe('Filter tasks by status'),
|
||||
withSubtasks: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Include subtasks in the response'),
|
||||
file: z.string().optional().describe('Path to the tasks file'),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Root directory of the project (default: current working directory)'
|
||||
)
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Listing tasks with filters: ${JSON.stringify(args)}`);
|
||||
|
||||
const cmdArgs = [];
|
||||
if (args.status) cmdArgs.push(`--status=${args.status}`);
|
||||
if (args.withSubtasks) cmdArgs.push("--with-subtasks");
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
// Call core function - args contains projectRoot which is handled internally
|
||||
const result = await listTasksDirect(args, log);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"list",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
log.info(`Listing tasks result: ${result.stdout}`, result.stdout);
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error listing tasks: ${error.message}`);
|
||||
return createErrorResponse(`Error listing tasks: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
// Log result and use handleApiResult utility
|
||||
log.info(
|
||||
`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks`
|
||||
);
|
||||
return handleApiResult(result, log, 'Error listing tasks');
|
||||
} catch (error) {
|
||||
log.error(`Error listing tasks: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// We no longer need the formatTasksResponse function as we're returning raw JSON data
|
||||
|
||||
@@ -3,55 +3,55 @@
|
||||
* Tool to show the next task to work on based on dependencies and status
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import { z } from 'zod';
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse
|
||||
} from './utils.js';
|
||||
|
||||
/**
|
||||
* Register the nextTask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerNextTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "nextTask",
|
||||
description:
|
||||
"Show the next task to work on based on dependencies and status",
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Finding next task to work on`);
|
||||
server.addTool({
|
||||
name: 'nextTask',
|
||||
description:
|
||||
'Show the next task to work on based on dependencies and status',
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe('Path to the tasks file'),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
'Root directory of the project (default: current working directory)'
|
||||
)
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Finding next task to work on`);
|
||||
|
||||
const cmdArgs = [];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
const cmdArgs = [];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
const projectRoot = args.projectRoot;
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"next",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
const result = executeTaskMasterCommand(
|
||||
'next',
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error finding next task: ${error.message}`);
|
||||
return createErrorResponse(`Error finding next task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error finding next task: ${error.message}`);
|
||||
return createErrorResponse(`Error finding next task: ${error.message}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -3,62 +3,62 @@
|
||||
* Tool to set the status of a task
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import { z } from 'zod';
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse
|
||||
} from './utils.js';
|
||||
|
||||
/**
|
||||
* Register the setTaskStatus tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerSetTaskStatusTool(server) {
|
||||
server.addTool({
|
||||
name: "setTaskStatus",
|
||||
description: "Set the status of a task",
|
||||
parameters: z.object({
|
||||
id: z
|
||||
.string()
|
||||
.describe("Task ID (can be comma-separated for multiple tasks)"),
|
||||
status: z
|
||||
.string()
|
||||
.describe("New status (todo, in-progress, review, done)"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
|
||||
server.addTool({
|
||||
name: 'setTaskStatus',
|
||||
description: 'Set the status of a task',
|
||||
parameters: z.object({
|
||||
id: z
|
||||
.string()
|
||||
.describe('Task ID (can be comma-separated for multiple tasks)'),
|
||||
status: z
|
||||
.string()
|
||||
.describe('New status (todo, in-progress, review, done)'),
|
||||
file: z.string().optional().describe('Path to the tasks file'),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
'Root directory of the project (default: current working directory)'
|
||||
)
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
|
||||
|
||||
const cmdArgs = [`--id=${args.id}`, `--status=${args.status}`];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
const cmdArgs = [`--id=${args.id}`, `--status=${args.status}`];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
const projectRoot = args.projectRoot;
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"set-status",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
const result = executeTaskMasterCommand(
|
||||
'set-status',
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error setting task status: ${error.message}`);
|
||||
return createErrorResponse(
|
||||
`Error setting task status: ${error.message}`
|
||||
);
|
||||
}
|
||||
},
|
||||
});
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error setting task status: ${error.message}`);
|
||||
return createErrorResponse(
|
||||
`Error setting task status: ${error.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -3,55 +3,80 @@
|
||||
* Tool to show detailed information about a specific task
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
import { z } from 'zod';
|
||||
import {
|
||||
executeTaskMasterCommand,
|
||||
createContentResponse,
|
||||
createErrorResponse,
|
||||
} from "./utils.js";
|
||||
executeTaskMasterCommand,
|
||||
createErrorResponse,
|
||||
handleApiResult
|
||||
} from './utils.js';
|
||||
|
||||
/**
|
||||
* Register the showTask tool with the MCP server
|
||||
* @param {Object} server - FastMCP server instance
|
||||
*/
|
||||
export function registerShowTaskTool(server) {
|
||||
server.addTool({
|
||||
name: "showTask",
|
||||
description: "Show detailed information about a specific task",
|
||||
parameters: z.object({
|
||||
id: z.string().describe("Task ID to show"),
|
||||
file: z.string().optional().describe("Path to the tasks file"),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe(
|
||||
"Root directory of the project (default: current working directory)"
|
||||
),
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Showing task details for ID: ${args.id}`);
|
||||
server.addTool({
|
||||
name: 'showTask',
|
||||
description: 'Show detailed information about a specific task',
|
||||
parameters: z.object({
|
||||
id: z.string().describe('Task ID to show'),
|
||||
file: z.string().optional().describe('Path to the tasks file'),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Root directory of the project (default: current working directory)'
|
||||
)
|
||||
}),
|
||||
execute: async (args, { log }) => {
|
||||
try {
|
||||
log.info(`Showing task details for ID: ${args.id}`);
|
||||
|
||||
const cmdArgs = [`--id=${args.id}`];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
// Prepare arguments for CLI command
|
||||
const cmdArgs = [`--id=${args.id}`];
|
||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||
|
||||
const projectRoot = args.projectRoot;
|
||||
// Execute the command - function now handles project root internally
|
||||
const result = executeTaskMasterCommand(
|
||||
'show',
|
||||
log,
|
||||
cmdArgs,
|
||||
args.projectRoot // Pass raw project root, function will normalize it
|
||||
);
|
||||
|
||||
const result = executeTaskMasterCommand(
|
||||
"show",
|
||||
log,
|
||||
cmdArgs,
|
||||
projectRoot
|
||||
);
|
||||
|
||||
if (!result.success) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return createContentResponse(result.stdout);
|
||||
} catch (error) {
|
||||
log.error(`Error showing task: ${error.message}`);
|
||||
return createErrorResponse(`Error showing task: ${error.message}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
// Process CLI result into API result format for handleApiResult
|
||||
if (result.success) {
|
||||
try {
|
||||
// Try to parse response as JSON
|
||||
const data = JSON.parse(result.stdout);
|
||||
// Return equivalent of a successful API call with data
|
||||
return handleApiResult(
|
||||
{ success: true, data },
|
||||
log,
|
||||
'Error showing task'
|
||||
);
|
||||
} catch (e) {
|
||||
// If parsing fails, still return success but with raw string data
|
||||
return handleApiResult(
|
||||
{ success: true, data: result.stdout },
|
||||
log,
|
||||
'Error showing task',
|
||||
// Skip data processing for string data
|
||||
null
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Return equivalent of a failed API call
|
||||
return handleApiResult(
|
||||
{ success: false, error: { message: result.error } },
|
||||
log,
|
||||
'Error showing task'
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(`Error showing task: ${error.message}`);
|
||||
return createErrorResponse(error.message);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -3,106 +3,402 @@
|
||||
* Utility functions for Task Master CLI integration
|
||||
*/
|
||||
|
||||
import { spawnSync } from "child_process";
|
||||
import { spawnSync } from 'child_process';
|
||||
import path from 'path';
|
||||
import { contextManager } from '../core/context-manager.js'; // Import the singleton
|
||||
|
||||
/**
|
||||
* Get normalized project root path
|
||||
* @param {string|undefined} projectRootRaw - Raw project root from arguments
|
||||
* @param {Object} log - Logger object
|
||||
* @returns {string} - Normalized absolute path to project root
|
||||
*/
|
||||
export function getProjectRoot(projectRootRaw, log) {
|
||||
// Make sure projectRoot is set
|
||||
const rootPath = projectRootRaw || process.cwd();
|
||||
|
||||
// Ensure projectRoot is absolute
|
||||
const projectRoot = path.isAbsolute(rootPath)
|
||||
? rootPath
|
||||
: path.resolve(process.cwd(), rootPath);
|
||||
|
||||
log.info(`Using project root: ${projectRoot}`);
|
||||
return projectRoot;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle API result with standardized error handling and response formatting
|
||||
* @param {Object} result - Result object from API call with success, data, and error properties
|
||||
* @param {Object} log - Logger object
|
||||
* @param {string} errorPrefix - Prefix for error messages
|
||||
* @param {Function} processFunction - Optional function to process successful result data
|
||||
* @returns {Object} - Standardized MCP response object
|
||||
*/
|
||||
export function handleApiResult(
|
||||
result,
|
||||
log,
|
||||
errorPrefix = 'API error',
|
||||
processFunction = processMCPResponseData
|
||||
) {
|
||||
if (!result.success) {
|
||||
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
|
||||
// Include cache status in error logs
|
||||
log.error(`${errorPrefix}: ${errorMsg}. From cache: ${result.fromCache}`); // Keep logging cache status on error
|
||||
return createErrorResponse(errorMsg);
|
||||
}
|
||||
|
||||
// Process the result data if needed
|
||||
const processedData = processFunction
|
||||
? processFunction(result.data)
|
||||
: result.data;
|
||||
|
||||
// Log success including cache status
|
||||
log.info(`Successfully completed operation. From cache: ${result.fromCache}`); // Add success log with cache status
|
||||
|
||||
// Create the response payload including the fromCache flag
|
||||
const responsePayload = {
|
||||
fromCache: result.fromCache, // Get the flag from the original 'result'
|
||||
data: processedData // Nest the processed data under a 'data' key
|
||||
};
|
||||
|
||||
// Pass this combined payload to createContentResponse
|
||||
return createContentResponse(responsePayload);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a Task Master CLI command using child_process
|
||||
* @param {string} command - The command to execute
|
||||
* @param {Object} log - The logger object from FastMCP
|
||||
* @param {Array} args - Arguments for the command
|
||||
* @param {string} cwd - Working directory for command execution (defaults to current project root)
|
||||
* @param {string|undefined} projectRootRaw - Optional raw project root path (will be normalized internally)
|
||||
* @returns {Object} - The result of the command execution
|
||||
*/
|
||||
export function executeTaskMasterCommand(
|
||||
command,
|
||||
log,
|
||||
args = [],
|
||||
cwd = process.cwd()
|
||||
command,
|
||||
log,
|
||||
args = [],
|
||||
projectRootRaw = null
|
||||
) {
|
||||
try {
|
||||
log.info(
|
||||
`Executing task-master ${command} with args: ${JSON.stringify(
|
||||
args
|
||||
)} in directory: ${cwd}`
|
||||
);
|
||||
try {
|
||||
// Normalize project root internally using the getProjectRoot utility
|
||||
const cwd = getProjectRoot(projectRootRaw, log);
|
||||
|
||||
// Prepare full arguments array
|
||||
const fullArgs = [command, ...args];
|
||||
log.info(
|
||||
`Executing task-master ${command} with args: ${JSON.stringify(
|
||||
args
|
||||
)} in directory: ${cwd}`
|
||||
);
|
||||
|
||||
// Common options for spawn
|
||||
const spawnOptions = {
|
||||
encoding: "utf8",
|
||||
cwd: cwd,
|
||||
};
|
||||
// Prepare full arguments array
|
||||
const fullArgs = [command, ...args];
|
||||
|
||||
// Execute the command using the global task-master CLI or local script
|
||||
// Try the global CLI first
|
||||
let result = spawnSync("task-master", fullArgs, spawnOptions);
|
||||
// Common options for spawn
|
||||
const spawnOptions = {
|
||||
encoding: 'utf8',
|
||||
cwd: cwd
|
||||
};
|
||||
|
||||
// If global CLI is not available, try fallback to the local script
|
||||
if (result.error && result.error.code === "ENOENT") {
|
||||
log.info("Global task-master not found, falling back to local script");
|
||||
result = spawnSync("node", ["scripts/dev.js", ...fullArgs], spawnOptions);
|
||||
}
|
||||
// Execute the command using the global task-master CLI or local script
|
||||
// Try the global CLI first
|
||||
let result = spawnSync('task-master', fullArgs, spawnOptions);
|
||||
|
||||
if (result.error) {
|
||||
throw new Error(`Command execution error: ${result.error.message}`);
|
||||
}
|
||||
// If global CLI is not available, try fallback to the local script
|
||||
if (result.error && result.error.code === 'ENOENT') {
|
||||
log.info('Global task-master not found, falling back to local script');
|
||||
result = spawnSync('node', ['scripts/dev.js', ...fullArgs], spawnOptions);
|
||||
}
|
||||
|
||||
if (result.status !== 0) {
|
||||
// Improve error handling by combining stderr and stdout if stderr is empty
|
||||
const errorOutput = result.stderr
|
||||
? result.stderr.trim()
|
||||
: result.stdout
|
||||
? result.stdout.trim()
|
||||
: "Unknown error";
|
||||
throw new Error(
|
||||
`Command failed with exit code ${result.status}: ${errorOutput}`
|
||||
);
|
||||
}
|
||||
if (result.error) {
|
||||
throw new Error(`Command execution error: ${result.error.message}`);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr,
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Error executing task-master command: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: error.message,
|
||||
};
|
||||
}
|
||||
if (result.status !== 0) {
|
||||
// Improve error handling by combining stderr and stdout if stderr is empty
|
||||
const errorOutput = result.stderr
|
||||
? result.stderr.trim()
|
||||
: result.stdout
|
||||
? result.stdout.trim()
|
||||
: 'Unknown error';
|
||||
throw new Error(
|
||||
`Command failed with exit code ${result.status}: ${errorOutput}`
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
stdout: result.stdout,
|
||||
stderr: result.stderr
|
||||
};
|
||||
} catch (error) {
|
||||
log.error(`Error executing task-master command: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks cache for a result using the provided key. If not found, executes the action function,
|
||||
* caches the result upon success, and returns the result.
|
||||
*
|
||||
* @param {Object} options - Configuration options.
|
||||
* @param {string} options.cacheKey - The unique key for caching this operation's result.
|
||||
* @param {Function} options.actionFn - The async function to execute if the cache misses.
|
||||
* Should return an object like { success: boolean, data?: any, error?: { code: string, message: string } }.
|
||||
* @param {Object} options.log - The logger instance.
|
||||
* @returns {Promise<Object>} - An object containing the result, indicating if it was from cache.
|
||||
* Format: { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||
*/
|
||||
export async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
||||
// Check cache first
|
||||
const cachedResult = contextManager.getCachedData(cacheKey);
|
||||
|
||||
if (cachedResult !== undefined) {
|
||||
log.info(`Cache hit for key: ${cacheKey}`);
|
||||
// Return the cached data in the same structure as a fresh result
|
||||
return {
|
||||
...cachedResult, // Spread the cached result to maintain its structure
|
||||
fromCache: true // Just add the fromCache flag
|
||||
};
|
||||
}
|
||||
|
||||
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
|
||||
|
||||
// Execute the action function if cache missed
|
||||
const result = await actionFn();
|
||||
|
||||
// If the action was successful, cache the result (but without fromCache flag)
|
||||
if (result.success && result.data !== undefined) {
|
||||
log.info(`Action successful. Caching result for key: ${cacheKey}`);
|
||||
// Cache the entire result structure (minus the fromCache flag)
|
||||
const { fromCache, ...resultToCache } = result;
|
||||
contextManager.setCachedData(cacheKey, resultToCache);
|
||||
} else if (!result.success) {
|
||||
log.warn(
|
||||
`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`
|
||||
);
|
||||
} else {
|
||||
log.warn(
|
||||
`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`
|
||||
);
|
||||
}
|
||||
|
||||
// Return the fresh result, indicating it wasn't from cache
|
||||
return {
|
||||
...result,
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a Task Master tool action with standardized error handling, logging, and response formatting.
|
||||
* Integrates caching logic via getCachedOrExecute if a cacheKeyGenerator is provided.
|
||||
*
|
||||
* @param {Object} options - Options for executing the tool action
|
||||
* @param {Function} options.actionFn - The core action function (e.g., listTasksDirect) to execute. Should return {success, data, error}.
|
||||
* @param {Object} options.args - Arguments for the action, passed to actionFn and cacheKeyGenerator.
|
||||
* @param {Object} options.log - Logger object from FastMCP.
|
||||
* @param {string} options.actionName - Name of the action for logging purposes.
|
||||
* @param {Function} [options.cacheKeyGenerator] - Optional function to generate a cache key based on args. If provided, caching is enabled.
|
||||
* @param {Function} [options.processResult=processMCPResponseData] - Optional function to process the result data before returning.
|
||||
* @returns {Promise<Object>} - Standardized response for FastMCP.
|
||||
*/
|
||||
export async function executeMCPToolAction({
|
||||
actionFn,
|
||||
args,
|
||||
log,
|
||||
actionName,
|
||||
cacheKeyGenerator, // Note: We decided not to use this for listTasks for now
|
||||
processResult = processMCPResponseData
|
||||
}) {
|
||||
try {
|
||||
// Log the action start
|
||||
log.info(`${actionName} with args: ${JSON.stringify(args)}`);
|
||||
|
||||
// Normalize project root path - common to almost all tools
|
||||
const projectRootRaw = args.projectRoot || process.cwd();
|
||||
const projectRoot = path.isAbsolute(projectRootRaw)
|
||||
? projectRootRaw
|
||||
: path.resolve(process.cwd(), projectRootRaw);
|
||||
|
||||
log.info(`Using project root: ${projectRoot}`);
|
||||
const executionArgs = { ...args, projectRoot };
|
||||
|
||||
let result;
|
||||
const cacheKey = cacheKeyGenerator
|
||||
? cacheKeyGenerator(executionArgs)
|
||||
: null;
|
||||
|
||||
if (cacheKey) {
|
||||
// Use caching utility
|
||||
log.info(`Caching enabled for ${actionName} with key: ${cacheKey}`);
|
||||
const cacheWrappedAction = async () => await actionFn(executionArgs, log);
|
||||
result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: cacheWrappedAction,
|
||||
log
|
||||
});
|
||||
} else {
|
||||
// Execute directly without caching
|
||||
log.info(`Caching disabled for ${actionName}. Executing directly.`);
|
||||
// We need to ensure the result from actionFn has a fromCache field
|
||||
// Let's assume actionFn now consistently returns { success, data/error, fromCache }
|
||||
// The current listTasksDirect does this if it calls getCachedOrExecute internally.
|
||||
result = await actionFn(executionArgs, log);
|
||||
// If the action function itself doesn't determine caching (like our original listTasksDirect refactor attempt),
|
||||
// we'd set it here:
|
||||
// result.fromCache = false;
|
||||
}
|
||||
|
||||
// Handle error case
|
||||
if (!result.success) {
|
||||
const errorMsg =
|
||||
result.error?.message ||
|
||||
`Unknown error during ${actionName.toLowerCase()}`;
|
||||
// Include fromCache in error logs too, might be useful
|
||||
log.error(
|
||||
`Error during ${actionName.toLowerCase()}: ${errorMsg}. From cache: ${result.fromCache}`
|
||||
);
|
||||
return createErrorResponse(errorMsg);
|
||||
}
|
||||
|
||||
// Log success
|
||||
log.info(
|
||||
`Successfully completed ${actionName.toLowerCase()}. From cache: ${result.fromCache}`
|
||||
);
|
||||
|
||||
// Process the result data if needed
|
||||
const processedData = processResult
|
||||
? processResult(result.data)
|
||||
: result.data;
|
||||
|
||||
// Create a new object that includes both the processed data and the fromCache flag
|
||||
const responsePayload = {
|
||||
fromCache: result.fromCache, // Include the flag here
|
||||
data: processedData // Embed the actual data under a 'data' key
|
||||
};
|
||||
|
||||
// Pass this combined payload to createContentResponse
|
||||
return createContentResponse(responsePayload);
|
||||
} catch (error) {
|
||||
// Handle unexpected errors during the execution wrapper itself
|
||||
log.error(
|
||||
`Unexpected error during ${actionName.toLowerCase()} execution wrapper: ${error.message}`
|
||||
);
|
||||
console.error(error.stack); // Log stack for debugging wrapper errors
|
||||
return createErrorResponse(
|
||||
`Internal server error during ${actionName.toLowerCase()}: ${error.message}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively removes specified fields from task objects, whether single or in an array.
|
||||
* Handles common data structures returned by task commands.
|
||||
* @param {Object|Array} taskOrData - A single task object or a data object containing a 'tasks' array.
|
||||
* @param {string[]} fieldsToRemove - An array of field names to remove.
|
||||
* @returns {Object|Array} - The processed data with specified fields removed.
|
||||
*/
|
||||
export function processMCPResponseData(
|
||||
taskOrData,
|
||||
fieldsToRemove = ['details', 'testStrategy']
|
||||
) {
|
||||
if (!taskOrData) {
|
||||
return taskOrData;
|
||||
}
|
||||
|
||||
// Helper function to process a single task object
|
||||
const processSingleTask = (task) => {
|
||||
if (typeof task !== 'object' || task === null) {
|
||||
return task;
|
||||
}
|
||||
|
||||
const processedTask = { ...task };
|
||||
|
||||
// Remove specified fields from the task
|
||||
fieldsToRemove.forEach((field) => {
|
||||
delete processedTask[field];
|
||||
});
|
||||
|
||||
// Recursively process subtasks if they exist and are an array
|
||||
if (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {
|
||||
// Use processArrayOfTasks to handle the subtasks array
|
||||
processedTask.subtasks = processArrayOfTasks(processedTask.subtasks);
|
||||
}
|
||||
|
||||
return processedTask;
|
||||
};
|
||||
|
||||
// Helper function to process an array of tasks
|
||||
const processArrayOfTasks = (tasks) => {
|
||||
return tasks.map(processSingleTask);
|
||||
};
|
||||
|
||||
// Check if the input is a data structure containing a 'tasks' array (like from listTasks)
|
||||
if (
|
||||
typeof taskOrData === 'object' &&
|
||||
taskOrData !== null &&
|
||||
Array.isArray(taskOrData.tasks)
|
||||
) {
|
||||
return {
|
||||
...taskOrData, // Keep other potential fields like 'stats', 'filter'
|
||||
tasks: processArrayOfTasks(taskOrData.tasks)
|
||||
};
|
||||
}
|
||||
// Check if the input is likely a single task object (add more checks if needed)
|
||||
else if (
|
||||
typeof taskOrData === 'object' &&
|
||||
taskOrData !== null &&
|
||||
'id' in taskOrData &&
|
||||
'title' in taskOrData
|
||||
) {
|
||||
return processSingleTask(taskOrData);
|
||||
}
|
||||
// Check if the input is an array of tasks directly (less common but possible)
|
||||
else if (Array.isArray(taskOrData)) {
|
||||
return processArrayOfTasks(taskOrData);
|
||||
}
|
||||
|
||||
// If it doesn't match known task structures, return it as is
|
||||
return taskOrData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates standard content response for tools
|
||||
* @param {string} text - Text content to include in response
|
||||
* @returns {Object} - Content response object
|
||||
* @param {string|Object} content - Content to include in response
|
||||
* @returns {Object} - Content response object in FastMCP format
|
||||
*/
|
||||
export function createContentResponse(text) {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
text,
|
||||
type: "text",
|
||||
},
|
||||
],
|
||||
};
|
||||
export function createContentResponse(content) {
|
||||
// FastMCP requires text type, so we format objects as JSON strings
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text:
|
||||
typeof content === 'object'
|
||||
? // Format JSON nicely with indentation
|
||||
JSON.stringify(content, null, 2)
|
||||
: // Keep other content types as-is
|
||||
String(content)
|
||||
}
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates error response for tools
|
||||
* @param {string} errorMessage - Error message to include in response
|
||||
* @returns {Object} - Error content response object
|
||||
* @returns {Object} - Error content response object in FastMCP format
|
||||
*/
|
||||
export function createErrorResponse(errorMessage) {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
text: errorMessage,
|
||||
type: "text",
|
||||
},
|
||||
],
|
||||
};
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Error: ${errorMessage}`
|
||||
}
|
||||
],
|
||||
isError: true
|
||||
};
|
||||
}
|
||||
|
||||
10
output.json
10
output.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"key": "value",
|
||||
"nested": {
|
||||
"prop": true
|
||||
}
|
||||
}
|
||||
"key": "value",
|
||||
"nested": {
|
||||
"prop": true
|
||||
}
|
||||
}
|
||||
|
||||
15178
package-lock.json
generated
15178
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
180
package.json
180
package.json
@@ -1,89 +1,95 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.9.30",
|
||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
"task-master": "bin/task-master.js",
|
||||
"task-master-init": "bin/task-master-init.js",
|
||||
"task-master-mcp-server": "mcp-server/server.js"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "node --experimental-vm-modules node_modules/.bin/jest",
|
||||
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
|
||||
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
|
||||
"prepare-package": "node scripts/prepare-package.js",
|
||||
"prepublishOnly": "npm run prepare-package",
|
||||
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js",
|
||||
"changeset": "changeset",
|
||||
"release": "changeset publish"
|
||||
},
|
||||
"keywords": [
|
||||
"claude",
|
||||
"task",
|
||||
"management",
|
||||
"ai",
|
||||
"development",
|
||||
"cursor",
|
||||
"anthropic",
|
||||
"llm",
|
||||
"mcp",
|
||||
"context"
|
||||
],
|
||||
"author": "Eyal Toledano",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^4.1.2",
|
||||
"cli-table3": "^0.6.5",
|
||||
"commander": "^11.1.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.3.1",
|
||||
"express": "^4.21.2",
|
||||
"fastmcp": "^1.20.5",
|
||||
"figlet": "^1.8.0",
|
||||
"gradient-string": "^3.0.0",
|
||||
"helmet": "^8.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"openai": "^4.89.0",
|
||||
"ora": "^8.2.0",
|
||||
"fuse.js": "^7.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/eyaltoledano/claude-task-master.git"
|
||||
},
|
||||
"homepage": "https://github.com/eyaltoledano/claude-task-master#readme",
|
||||
"bugs": {
|
||||
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
|
||||
},
|
||||
"files": [
|
||||
"scripts/init.js",
|
||||
"scripts/dev.js",
|
||||
"scripts/modules/**",
|
||||
"assets/**",
|
||||
".cursor/**",
|
||||
"README-task-master.md",
|
||||
"index.js",
|
||||
"bin/**",
|
||||
"mcp-server/**"
|
||||
],
|
||||
"overrides": {
|
||||
"node-fetch": "^3.3.2",
|
||||
"whatwg-url": "^11.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@changesets/changelog-github": "^0.5.1",
|
||||
"@changesets/cli": "^2.28.1",
|
||||
"@types/jest": "^29.5.14",
|
||||
"jest": "^29.7.0",
|
||||
"jest-environment-node": "^29.7.0",
|
||||
"mock-fs": "^5.5.0",
|
||||
"supertest": "^7.1.0"
|
||||
}
|
||||
"name": "task-master-ai",
|
||||
"version": "0.10.1",
|
||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
"task-master": "bin/task-master.js",
|
||||
"task-master-init": "bin/task-master-init.js",
|
||||
"task-master-mcp-server": "mcp-server/server.js"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "node --experimental-vm-modules node_modules/.bin/jest",
|
||||
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
|
||||
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
|
||||
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
|
||||
"prepare-package": "node scripts/prepare-package.js",
|
||||
"prepublishOnly": "npm run prepare-package",
|
||||
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js",
|
||||
"changeset": "changeset",
|
||||
"release": "changeset publish",
|
||||
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js",
|
||||
"format-check": "prettier --check .",
|
||||
"format": "prettier --write ."
|
||||
},
|
||||
"keywords": [
|
||||
"claude",
|
||||
"task",
|
||||
"management",
|
||||
"ai",
|
||||
"development",
|
||||
"cursor",
|
||||
"anthropic",
|
||||
"llm",
|
||||
"mcp",
|
||||
"context"
|
||||
],
|
||||
"author": "Eyal Toledano",
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"boxen": "^8.0.1",
|
||||
"chalk": "^4.1.2",
|
||||
"cli-table3": "^0.6.5",
|
||||
"commander": "^11.1.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.3.1",
|
||||
"express": "^4.21.2",
|
||||
"fastmcp": "^1.20.5",
|
||||
"figlet": "^1.8.0",
|
||||
"fuse.js": "^7.0.0",
|
||||
"gradient-string": "^3.0.0",
|
||||
"helmet": "^8.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lru-cache": "^10.2.0",
|
||||
"openai": "^4.89.0",
|
||||
"ora": "^8.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/eyaltoledano/claude-task-master.git"
|
||||
},
|
||||
"homepage": "https://github.com/eyaltoledano/claude-task-master#readme",
|
||||
"bugs": {
|
||||
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
|
||||
},
|
||||
"files": [
|
||||
"scripts/init.js",
|
||||
"scripts/dev.js",
|
||||
"scripts/modules/**",
|
||||
"assets/**",
|
||||
".cursor/**",
|
||||
"README-task-master.md",
|
||||
"index.js",
|
||||
"bin/**",
|
||||
"mcp-server/**"
|
||||
],
|
||||
"overrides": {
|
||||
"node-fetch": "^3.3.2",
|
||||
"whatwg-url": "^11.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@changesets/changelog-github": "^0.5.1",
|
||||
"@changesets/cli": "^2.28.1",
|
||||
"@types/jest": "^29.5.14",
|
||||
"jest": "^29.7.0",
|
||||
"jest-environment-node": "^29.7.0",
|
||||
"mock-fs": "^5.5.0",
|
||||
"prettier": "3.5.3",
|
||||
"supertest": "^7.1.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,9 +21,11 @@ In an AI-driven development process—particularly with tools like [Cursor](http
|
||||
The script can be configured through environment variables in a `.env` file at the root of the project:
|
||||
|
||||
### Required Configuration
|
||||
|
||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
||||
|
||||
### Optional Configuration
|
||||
|
||||
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
||||
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
||||
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
||||
@@ -38,9 +40,10 @@ The script can be configured through environment variables in a `.env` file at t
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **`tasks.json`**:
|
||||
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
||||
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
||||
1. **`tasks.json`**:
|
||||
|
||||
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
||||
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||
|
||||
@@ -94,14 +97,40 @@ node scripts/dev.js update --from=4 --prompt="Refactor tasks from ID 4 onward to
|
||||
# Update all tasks (default from=1)
|
||||
node scripts/dev.js update --prompt="Add authentication to all relevant tasks"
|
||||
|
||||
# With research-backed updates using Perplexity AI
|
||||
node scripts/dev.js update --from=4 --prompt="Integrate OAuth 2.0" --research
|
||||
|
||||
# Specify a different tasks file
|
||||
node scripts/dev.js update --file=custom-tasks.json --from=5 --prompt="Change database from MongoDB to PostgreSQL"
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- The `--prompt` parameter is required and should explain the changes or new context
|
||||
- Only tasks that aren't marked as 'done' will be updated
|
||||
- Tasks with ID >= the specified --from value will be updated
|
||||
- The `--research` flag uses Perplexity AI for more informed updates when available
|
||||
|
||||
## Updating a Single Task
|
||||
|
||||
The `update-task` command allows you to update a specific task instead of multiple tasks:
|
||||
|
||||
```bash
|
||||
# Update a specific task with new information
|
||||
node scripts/dev.js update-task --id=4 --prompt="Use JWT for authentication"
|
||||
|
||||
# With research-backed updates using Perplexity AI
|
||||
node scripts/dev.js update-task --id=4 --prompt="Use JWT for authentication" --research
|
||||
```
|
||||
|
||||
This command:
|
||||
|
||||
- Updates only the specified task rather than a range of tasks
|
||||
- Provides detailed validation with helpful error messages
|
||||
- Checks for required API keys when using research mode
|
||||
- Falls back gracefully if Perplexity API is unavailable
|
||||
- Preserves tasks that are already marked as "done"
|
||||
- Includes contextual error handling for common issues
|
||||
|
||||
## Setting Task Status
|
||||
|
||||
@@ -122,6 +151,7 @@ node scripts/dev.js set-status --id=1,2,3 --status=done
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
|
||||
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
|
||||
- You can specify multiple task IDs by separating them with commas
|
||||
@@ -171,6 +201,7 @@ node scripts/dev.js clear-subtasks --all
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- After clearing subtasks, task files are automatically regenerated
|
||||
- This is useful when you want to regenerate subtasks with a different approach
|
||||
- Can be combined with the `expand` command to immediately generate new subtasks
|
||||
@@ -186,6 +217,7 @@ The script integrates with two AI services:
|
||||
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
|
||||
|
||||
To use the Perplexity integration:
|
||||
|
||||
1. Obtain a Perplexity API key
|
||||
2. Add `PERPLEXITY_API_KEY` to your `.env` file
|
||||
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
|
||||
@@ -194,6 +226,7 @@ To use the Perplexity integration:
|
||||
## Logging
|
||||
|
||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
||||
|
||||
- `debug`: Detailed information, typically useful for troubleshooting
|
||||
- `info`: Confirmation that things are working as expected (default)
|
||||
- `warn`: Warning messages that don't prevent execution
|
||||
@@ -216,17 +249,20 @@ node scripts/dev.js remove-dependency --id=<id> --depends-on=<id>
|
||||
These commands:
|
||||
|
||||
1. **Allow precise dependency management**:
|
||||
|
||||
- Add dependencies between tasks with automatic validation
|
||||
- Remove dependencies when they're no longer needed
|
||||
- Update task files automatically after changes
|
||||
|
||||
2. **Include validation checks**:
|
||||
|
||||
- Prevent circular dependencies (a task depending on itself)
|
||||
- Prevent duplicate dependencies
|
||||
- Verify that both tasks exist before adding/removing dependencies
|
||||
- Check if dependencies exist before attempting to remove them
|
||||
|
||||
3. **Provide clear feedback**:
|
||||
|
||||
- Success messages confirm when dependencies are added/removed
|
||||
- Error messages explain why operations failed (if applicable)
|
||||
|
||||
@@ -251,6 +287,7 @@ node scripts/dev.js validate-dependencies --file=custom-tasks.json
|
||||
```
|
||||
|
||||
This command:
|
||||
|
||||
- Scans all tasks and subtasks for non-existent dependencies
|
||||
- Identifies potential self-dependencies (tasks referencing themselves)
|
||||
- Reports all found issues without modifying files
|
||||
@@ -272,6 +309,7 @@ node scripts/dev.js fix-dependencies --file=custom-tasks.json
|
||||
```
|
||||
|
||||
This command:
|
||||
|
||||
1. **Validates all dependencies** across tasks and subtasks
|
||||
2. **Automatically removes**:
|
||||
- References to non-existent tasks and subtasks
|
||||
@@ -309,6 +347,7 @@ node scripts/dev.js analyze-complexity --research
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
|
||||
- Tasks are scored on a scale of 1-10
|
||||
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
|
||||
@@ -333,33 +372,35 @@ node scripts/dev.js expand --id=8 --num=5 --prompt="Custom prompt"
|
||||
```
|
||||
|
||||
When a complexity report exists:
|
||||
|
||||
- The `expand` command will use the recommended subtask count from the report (unless overridden)
|
||||
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
|
||||
- When using `--all`, tasks are sorted by complexity score (highest first)
|
||||
- The `--research` flag is preserved from the complexity analysis to expansion
|
||||
|
||||
The output report structure is:
|
||||
|
||||
```json
|
||||
{
|
||||
"meta": {
|
||||
"generatedAt": "2023-06-15T12:34:56.789Z",
|
||||
"tasksAnalyzed": 20,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Your Project Name",
|
||||
"usedResearch": true
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 8,
|
||||
"taskTitle": "Develop Implementation Drift Handling",
|
||||
"complexityScore": 9.5,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Create subtasks that handle detecting...",
|
||||
"reasoning": "This task requires sophisticated logic...",
|
||||
"expansionCommand": "node scripts/dev.js expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
||||
},
|
||||
// More tasks sorted by complexity score (highest first)
|
||||
]
|
||||
"meta": {
|
||||
"generatedAt": "2023-06-15T12:34:56.789Z",
|
||||
"tasksAnalyzed": 20,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Your Project Name",
|
||||
"usedResearch": true
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 8,
|
||||
"taskTitle": "Develop Implementation Drift Handling",
|
||||
"complexityScore": 9.5,
|
||||
"recommendedSubtasks": 6,
|
||||
"expansionPrompt": "Create subtasks that handle detecting...",
|
||||
"reasoning": "This task requires sophisticated logic...",
|
||||
"expansionCommand": "node scripts/dev.js expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
||||
}
|
||||
// More tasks sorted by complexity score (highest first)
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -426,4 +467,102 @@ This command:
|
||||
- Commands for working with subtasks
|
||||
- For subtasks, provides a link to view the parent task
|
||||
|
||||
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
|
||||
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
|
||||
|
||||
## Enhanced Error Handling
|
||||
|
||||
The script now includes improved error handling throughout all commands:
|
||||
|
||||
1. **Detailed Validation**:
|
||||
|
||||
- Required parameters (like task IDs and prompts) are validated early
|
||||
- File existence is checked with customized errors for common scenarios
|
||||
- Parameter type conversion is handled with clear error messages
|
||||
|
||||
2. **Contextual Error Messages**:
|
||||
|
||||
- Task not found errors include suggestions to run the list command
|
||||
- API key errors include reminders to check environment variables
|
||||
- Invalid ID format errors show the expected format
|
||||
|
||||
3. **Command-Specific Help Displays**:
|
||||
|
||||
- When validation fails, detailed help for the specific command is shown
|
||||
- Help displays include usage examples and parameter descriptions
|
||||
- Formatted in clear, color-coded boxes with examples
|
||||
|
||||
4. **Helpful Error Recovery**:
|
||||
- Detailed troubleshooting steps for common errors
|
||||
- Graceful fallbacks for missing optional dependencies
|
||||
- Clear instructions for how to fix configuration issues
|
||||
|
||||
## Version Checking
|
||||
|
||||
The script now automatically checks for updates without slowing down execution:
|
||||
|
||||
1. **Background Version Checking**:
|
||||
|
||||
- Non-blocking version checks run in the background while commands execute
|
||||
- Actual command execution isn't delayed by version checking
|
||||
- Update notifications appear after command completion
|
||||
|
||||
2. **Update Notifications**:
|
||||
|
||||
- When a newer version is available, a notification is displayed
|
||||
- Notifications include current version, latest version, and update command
|
||||
- Formatted in an attention-grabbing box with clear instructions
|
||||
|
||||
3. **Implementation Details**:
|
||||
- Uses semantic versioning to compare current and latest versions
|
||||
- Fetches version information from npm registry with a timeout
|
||||
- Gracefully handles connection issues without affecting command execution
|
||||
|
||||
## Subtask Management
|
||||
|
||||
The script now includes enhanced commands for managing subtasks:
|
||||
|
||||
### Adding Subtasks
|
||||
|
||||
```bash
|
||||
# Add a subtask to an existing task
|
||||
node scripts/dev.js add-subtask --parent=5 --title="Implement login UI" --description="Create login form"
|
||||
|
||||
# Convert an existing task to a subtask
|
||||
node scripts/dev.js add-subtask --parent=5 --task-id=8
|
||||
|
||||
# Add a subtask with dependencies
|
||||
node scripts/dev.js add-subtask --parent=5 --title="Authentication middleware" --dependencies=5.1,5.2
|
||||
|
||||
# Skip regenerating task files
|
||||
node scripts/dev.js add-subtask --parent=5 --title="Login API route" --skip-generate
|
||||
```
|
||||
|
||||
Key features:
|
||||
|
||||
- Create new subtasks with detailed properties or convert existing tasks
|
||||
- Define dependencies between subtasks
|
||||
- Set custom status for new subtasks
|
||||
- Provides next-step suggestions after creation
|
||||
|
||||
### Removing Subtasks
|
||||
|
||||
```bash
|
||||
# Remove a subtask
|
||||
node scripts/dev.js remove-subtask --id=5.2
|
||||
|
||||
# Remove multiple subtasks
|
||||
node scripts/dev.js remove-subtask --id=5.2,5.3,5.4
|
||||
|
||||
# Convert a subtask to a standalone task
|
||||
node scripts/dev.js remove-subtask --id=5.2 --convert
|
||||
|
||||
# Skip regenerating task files
|
||||
node scripts/dev.js remove-subtask --id=5.2 --skip-generate
|
||||
```
|
||||
|
||||
Key features:
|
||||
|
||||
- Remove subtasks individually or in batches
|
||||
- Optionally convert subtasks to standalone tasks
|
||||
- Control whether task files are regenerated
|
||||
- Provides detailed success messages and next steps
|
||||
|
||||
@@ -3,17 +3,17 @@
|
||||
/**
|
||||
* dev.js
|
||||
* Task Master CLI - AI-driven development task management
|
||||
*
|
||||
*
|
||||
* This is the refactored entry point that uses the modular architecture.
|
||||
* It imports functionality from the modules directory and provides a CLI.
|
||||
*/
|
||||
|
||||
// Add at the very beginning of the file
|
||||
if (process.env.DEBUG === '1') {
|
||||
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
|
||||
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
|
||||
}
|
||||
|
||||
import { runCLI } from './modules/commands.js';
|
||||
|
||||
// Run the CLI with the process arguments
|
||||
runCLI(process.argv);
|
||||
runCLI(process.argv);
|
||||
|
||||
1471
scripts/init.js
1471
scripts/init.js
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -8,4 +8,4 @@ export * from './utils.js';
|
||||
export * from './ui.js';
|
||||
export * from './ai-services.js';
|
||||
export * from './task-manager.js';
|
||||
export * from './commands.js';
|
||||
export * from './commands.js';
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
32
scripts/modules/task-manager.js (lines 3036-3084)
Normal file
32
scripts/modules/task-manager.js (lines 3036-3084)
Normal file
@@ -0,0 +1,32 @@
|
||||
async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) {
|
||||
let loadingIndicator = null;
|
||||
try {
|
||||
log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
|
||||
|
||||
// Validate subtask ID format
|
||||
if (!subtaskId || typeof subtaskId !== 'string' || !subtaskId.includes('.')) {
|
||||
throw new Error(`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId"`);
|
||||
}
|
||||
|
||||
// Validate prompt
|
||||
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
|
||||
throw new Error('Prompt cannot be empty. Please provide context for the subtask update.');
|
||||
}
|
||||
|
||||
// Prepare for fallback handling
|
||||
let claudeOverloaded = false;
|
||||
|
||||
// Validate tasks file exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
throw new Error(`Tasks file not found at path: ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Read the tasks file
|
||||
const data = readJSON(tasksPath);
|
||||
// ... rest of the function
|
||||
} catch (error) {
|
||||
// Handle errors
|
||||
console.error(`Error updating subtask: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,23 +9,23 @@ import chalk from 'chalk';
|
||||
|
||||
// Configuration and constants
|
||||
const CONFIG = {
|
||||
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
|
||||
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
|
||||
debug: process.env.DEBUG === "true",
|
||||
logLevel: process.env.LOG_LEVEL || "info",
|
||||
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || "3"),
|
||||
defaultPriority: process.env.DEFAULT_PRIORITY || "medium",
|
||||
projectName: process.env.PROJECT_NAME || "Task Master",
|
||||
projectVersion: "1.5.0" // Hardcoded version - ALWAYS use this value, ignore environment variable
|
||||
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
|
||||
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
|
||||
debug: process.env.DEBUG === 'true',
|
||||
logLevel: process.env.LOG_LEVEL || 'info',
|
||||
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || '3'),
|
||||
defaultPriority: process.env.DEFAULT_PRIORITY || 'medium',
|
||||
projectName: process.env.PROJECT_NAME || 'Task Master',
|
||||
projectVersion: '1.5.0' // Hardcoded version - ALWAYS use this value, ignore environment variable
|
||||
};
|
||||
|
||||
// Set up logging based on log level
|
||||
const LOG_LEVELS = {
|
||||
debug: 0,
|
||||
info: 1,
|
||||
warn: 2,
|
||||
error: 3
|
||||
debug: 0,
|
||||
info: 1,
|
||||
warn: 2,
|
||||
error: 3
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -34,18 +34,18 @@ const LOG_LEVELS = {
|
||||
* @param {...any} args - Arguments to log
|
||||
*/
|
||||
function log(level, ...args) {
|
||||
const icons = {
|
||||
debug: chalk.gray('🔍'),
|
||||
info: chalk.blue('ℹ️'),
|
||||
warn: chalk.yellow('⚠️'),
|
||||
error: chalk.red('❌'),
|
||||
success: chalk.green('✅')
|
||||
};
|
||||
|
||||
if (LOG_LEVELS[level] >= LOG_LEVELS[CONFIG.logLevel]) {
|
||||
const icon = icons[level] || '';
|
||||
console.log(`${icon} ${args.join(' ')}`);
|
||||
}
|
||||
const icons = {
|
||||
debug: chalk.gray('🔍'),
|
||||
info: chalk.blue('ℹ️'),
|
||||
warn: chalk.yellow('⚠️'),
|
||||
error: chalk.red('❌'),
|
||||
success: chalk.green('✅')
|
||||
};
|
||||
|
||||
if (LOG_LEVELS[level] >= LOG_LEVELS[CONFIG.logLevel]) {
|
||||
const icon = icons[level] || '';
|
||||
console.log(`${icon} ${args.join(' ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -54,16 +54,16 @@ function log(level, ...args) {
|
||||
* @returns {Object} Parsed JSON data
|
||||
*/
|
||||
function readJSON(filepath) {
|
||||
try {
|
||||
const rawData = fs.readFileSync(filepath, 'utf8');
|
||||
return JSON.parse(rawData);
|
||||
} catch (error) {
|
||||
log('error', `Error reading JSON file ${filepath}:`, error.message);
|
||||
if (CONFIG.debug) {
|
||||
console.error(error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
const rawData = fs.readFileSync(filepath, 'utf8');
|
||||
return JSON.parse(rawData);
|
||||
} catch (error) {
|
||||
log('error', `Error reading JSON file ${filepath}:`, error.message);
|
||||
if (CONFIG.debug) {
|
||||
console.error(error);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -72,14 +72,14 @@ function readJSON(filepath) {
|
||||
* @param {Object} data - Data to write
|
||||
*/
|
||||
function writeJSON(filepath, data) {
|
||||
try {
|
||||
fs.writeFileSync(filepath, JSON.stringify(data, null, 2));
|
||||
} catch (error) {
|
||||
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
||||
if (CONFIG.debug) {
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
try {
|
||||
fs.writeFileSync(filepath, JSON.stringify(data, null, 2));
|
||||
} catch (error) {
|
||||
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
||||
if (CONFIG.debug) {
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -88,8 +88,8 @@ function writeJSON(filepath, data) {
|
||||
* @returns {string} Sanitized prompt
|
||||
*/
|
||||
function sanitizePrompt(prompt) {
|
||||
// Replace double quotes with escaped double quotes
|
||||
return prompt.replace(/"/g, '\\"');
|
||||
// Replace double quotes with escaped double quotes
|
||||
return prompt.replace(/"/g, '\\"');
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -98,18 +98,20 @@ function sanitizePrompt(prompt) {
|
||||
* @returns {Object|null} The parsed complexity report or null if not found
|
||||
*/
|
||||
function readComplexityReport(customPath = null) {
|
||||
try {
|
||||
const reportPath = customPath || path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
|
||||
if (!fs.existsSync(reportPath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const reportData = fs.readFileSync(reportPath, 'utf8');
|
||||
return JSON.parse(reportData);
|
||||
} catch (error) {
|
||||
log('warn', `Could not read complexity report: ${error.message}`);
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
const reportPath =
|
||||
customPath ||
|
||||
path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
|
||||
if (!fs.existsSync(reportPath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const reportData = fs.readFileSync(reportPath, 'utf8');
|
||||
return JSON.parse(reportData);
|
||||
} catch (error) {
|
||||
log('warn', `Could not read complexity report: ${error.message}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -119,11 +121,15 @@ function readComplexityReport(customPath = null) {
|
||||
* @returns {Object|null} The task analysis or null if not found
|
||||
*/
|
||||
function findTaskInComplexityReport(report, taskId) {
|
||||
if (!report || !report.complexityAnalysis || !Array.isArray(report.complexityAnalysis)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return report.complexityAnalysis.find(task => task.taskId === taskId);
|
||||
if (
|
||||
!report ||
|
||||
!report.complexityAnalysis ||
|
||||
!Array.isArray(report.complexityAnalysis)
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return report.complexityAnalysis.find((task) => task.taskId === taskId);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -133,24 +139,26 @@ function findTaskInComplexityReport(report, taskId) {
|
||||
* @returns {boolean} True if the task exists, false otherwise
|
||||
*/
|
||||
function taskExists(tasks, taskId) {
|
||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Handle both regular task IDs and subtask IDs (e.g., "1.2")
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
const [parentId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
|
||||
const parentTask = tasks.find(t => t.id === parentId);
|
||||
|
||||
if (!parentTask || !parentTask.subtasks) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return parentTask.subtasks.some(st => st.id === subtaskId);
|
||||
}
|
||||
|
||||
const id = parseInt(taskId, 10);
|
||||
return tasks.some(t => t.id === id);
|
||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Handle both regular task IDs and subtask IDs (e.g., "1.2")
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
const [parentId, subtaskId] = taskId
|
||||
.split('.')
|
||||
.map((id) => parseInt(id, 10));
|
||||
const parentTask = tasks.find((t) => t.id === parentId);
|
||||
|
||||
if (!parentTask || !parentTask.subtasks) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return parentTask.subtasks.some((st) => st.id === subtaskId);
|
||||
}
|
||||
|
||||
const id = parseInt(taskId, 10);
|
||||
return tasks.some((t) => t.id === id);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -159,15 +167,15 @@ function taskExists(tasks, taskId) {
|
||||
* @returns {string} The formatted task ID
|
||||
*/
|
||||
function formatTaskId(id) {
|
||||
if (typeof id === 'string' && id.includes('.')) {
|
||||
return id; // Already formatted as a string with a dot (e.g., "1.2")
|
||||
}
|
||||
|
||||
if (typeof id === 'number') {
|
||||
return id.toString();
|
||||
}
|
||||
|
||||
return id;
|
||||
if (typeof id === 'string' && id.includes('.')) {
|
||||
return id; // Already formatted as a string with a dot (e.g., "1.2")
|
||||
}
|
||||
|
||||
if (typeof id === 'number') {
|
||||
return id.toString();
|
||||
}
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -177,35 +185,37 @@ function formatTaskId(id) {
|
||||
* @returns {Object|null} The task object or null if not found
|
||||
*/
|
||||
function findTaskById(tasks, taskId) {
|
||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check if it's a subtask ID (e.g., "1.2")
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
const [parentId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
|
||||
const parentTask = tasks.find(t => t.id === parentId);
|
||||
|
||||
if (!parentTask || !parentTask.subtasks) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const subtask = parentTask.subtasks.find(st => st.id === subtaskId);
|
||||
if (subtask) {
|
||||
// Add reference to parent task for context
|
||||
subtask.parentTask = {
|
||||
id: parentTask.id,
|
||||
title: parentTask.title,
|
||||
status: parentTask.status
|
||||
};
|
||||
subtask.isSubtask = true;
|
||||
}
|
||||
|
||||
return subtask || null;
|
||||
}
|
||||
|
||||
const id = parseInt(taskId, 10);
|
||||
return tasks.find(t => t.id === id) || null;
|
||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Check if it's a subtask ID (e.g., "1.2")
|
||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||
const [parentId, subtaskId] = taskId
|
||||
.split('.')
|
||||
.map((id) => parseInt(id, 10));
|
||||
const parentTask = tasks.find((t) => t.id === parentId);
|
||||
|
||||
if (!parentTask || !parentTask.subtasks) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
|
||||
if (subtask) {
|
||||
// Add reference to parent task for context
|
||||
subtask.parentTask = {
|
||||
id: parentTask.id,
|
||||
title: parentTask.title,
|
||||
status: parentTask.status
|
||||
};
|
||||
subtask.isSubtask = true;
|
||||
}
|
||||
|
||||
return subtask || null;
|
||||
}
|
||||
|
||||
const id = parseInt(taskId, 10);
|
||||
return tasks.find((t) => t.id === id) || null;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -215,11 +225,11 @@ function findTaskById(tasks, taskId) {
|
||||
* @returns {string} The truncated text
|
||||
*/
|
||||
function truncate(text, maxLength) {
|
||||
if (!text || text.length <= maxLength) {
|
||||
return text;
|
||||
}
|
||||
|
||||
return text.slice(0, maxLength - 3) + '...';
|
||||
if (!text || text.length <= maxLength) {
|
||||
return text;
|
||||
}
|
||||
|
||||
return text.slice(0, maxLength - 3) + '...';
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -230,39 +240,47 @@ function truncate(text, maxLength) {
|
||||
* @param {Set} recursionStack - Set of nodes in current recursion stack
|
||||
* @returns {Array} - List of dependency edges that need to be removed to break cycles
|
||||
*/
|
||||
function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStack = new Set(), path = []) {
|
||||
// Mark the current node as visited and part of recursion stack
|
||||
visited.add(subtaskId);
|
||||
recursionStack.add(subtaskId);
|
||||
path.push(subtaskId);
|
||||
|
||||
const cyclesToBreak = [];
|
||||
|
||||
// Get all dependencies of the current subtask
|
||||
const dependencies = dependencyMap.get(subtaskId) || [];
|
||||
|
||||
// For each dependency
|
||||
for (const depId of dependencies) {
|
||||
// If not visited, recursively check for cycles
|
||||
if (!visited.has(depId)) {
|
||||
const cycles = findCycles(depId, dependencyMap, visited, recursionStack, [...path]);
|
||||
cyclesToBreak.push(...cycles);
|
||||
}
|
||||
// If the dependency is in the recursion stack, we found a cycle
|
||||
else if (recursionStack.has(depId)) {
|
||||
// Find the position of the dependency in the path
|
||||
const cycleStartIndex = path.indexOf(depId);
|
||||
// The last edge in the cycle is what we want to remove
|
||||
const cycleEdges = path.slice(cycleStartIndex);
|
||||
// We'll remove the last edge in the cycle (the one that points back)
|
||||
cyclesToBreak.push(depId);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the node from recursion stack before returning
|
||||
recursionStack.delete(subtaskId);
|
||||
|
||||
return cyclesToBreak;
|
||||
function findCycles(
|
||||
subtaskId,
|
||||
dependencyMap,
|
||||
visited = new Set(),
|
||||
recursionStack = new Set(),
|
||||
path = []
|
||||
) {
|
||||
// Mark the current node as visited and part of recursion stack
|
||||
visited.add(subtaskId);
|
||||
recursionStack.add(subtaskId);
|
||||
path.push(subtaskId);
|
||||
|
||||
const cyclesToBreak = [];
|
||||
|
||||
// Get all dependencies of the current subtask
|
||||
const dependencies = dependencyMap.get(subtaskId) || [];
|
||||
|
||||
// For each dependency
|
||||
for (const depId of dependencies) {
|
||||
// If not visited, recursively check for cycles
|
||||
if (!visited.has(depId)) {
|
||||
const cycles = findCycles(depId, dependencyMap, visited, recursionStack, [
|
||||
...path
|
||||
]);
|
||||
cyclesToBreak.push(...cycles);
|
||||
}
|
||||
// If the dependency is in the recursion stack, we found a cycle
|
||||
else if (recursionStack.has(depId)) {
|
||||
// Find the position of the dependency in the path
|
||||
const cycleStartIndex = path.indexOf(depId);
|
||||
// The last edge in the cycle is what we want to remove
|
||||
const cycleEdges = path.slice(cycleStartIndex);
|
||||
// We'll remove the last edge in the cycle (the one that points back)
|
||||
cyclesToBreak.push(depId);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the node from recursion stack before returning
|
||||
recursionStack.delete(subtaskId);
|
||||
|
||||
return cyclesToBreak;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -271,23 +289,23 @@ function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStac
|
||||
* @returns {string} The kebab-case version of the string
|
||||
*/
|
||||
const toKebabCase = (str) => {
|
||||
// Special handling for common acronyms
|
||||
const withReplacedAcronyms = str
|
||||
.replace(/ID/g, 'Id')
|
||||
.replace(/API/g, 'Api')
|
||||
.replace(/UI/g, 'Ui')
|
||||
.replace(/URL/g, 'Url')
|
||||
.replace(/URI/g, 'Uri')
|
||||
.replace(/JSON/g, 'Json')
|
||||
.replace(/XML/g, 'Xml')
|
||||
.replace(/HTML/g, 'Html')
|
||||
.replace(/CSS/g, 'Css');
|
||||
|
||||
// Insert hyphens before capital letters and convert to lowercase
|
||||
return withReplacedAcronyms
|
||||
.replace(/([A-Z])/g, '-$1')
|
||||
.toLowerCase()
|
||||
.replace(/^-/, ''); // Remove leading hyphen if present
|
||||
// Special handling for common acronyms
|
||||
const withReplacedAcronyms = str
|
||||
.replace(/ID/g, 'Id')
|
||||
.replace(/API/g, 'Api')
|
||||
.replace(/UI/g, 'Ui')
|
||||
.replace(/URL/g, 'Url')
|
||||
.replace(/URI/g, 'Uri')
|
||||
.replace(/JSON/g, 'Json')
|
||||
.replace(/XML/g, 'Xml')
|
||||
.replace(/HTML/g, 'Html')
|
||||
.replace(/CSS/g, 'Css');
|
||||
|
||||
// Insert hyphens before capital letters and convert to lowercase
|
||||
return withReplacedAcronyms
|
||||
.replace(/([A-Z])/g, '-$1')
|
||||
.toLowerCase()
|
||||
.replace(/^-/, ''); // Remove leading hyphen if present
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -296,46 +314,46 @@ const toKebabCase = (str) => {
|
||||
* @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted
|
||||
*/
|
||||
function detectCamelCaseFlags(args) {
|
||||
const camelCaseFlags = [];
|
||||
for (const arg of args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip single-word flags - they can't be camelCase
|
||||
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return camelCaseFlags;
|
||||
const camelCaseFlags = [];
|
||||
for (const arg of args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip single-word flags - they can't be camelCase
|
||||
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return camelCaseFlags;
|
||||
}
|
||||
|
||||
// Export all utility functions and configuration
|
||||
export {
|
||||
CONFIG,
|
||||
LOG_LEVELS,
|
||||
log,
|
||||
readJSON,
|
||||
writeJSON,
|
||||
sanitizePrompt,
|
||||
readComplexityReport,
|
||||
findTaskInComplexityReport,
|
||||
taskExists,
|
||||
formatTaskId,
|
||||
findTaskById,
|
||||
truncate,
|
||||
findCycles,
|
||||
toKebabCase,
|
||||
detectCamelCaseFlags
|
||||
};
|
||||
CONFIG,
|
||||
LOG_LEVELS,
|
||||
log,
|
||||
readJSON,
|
||||
writeJSON,
|
||||
sanitizePrompt,
|
||||
readComplexityReport,
|
||||
findTaskInComplexityReport,
|
||||
taskExists,
|
||||
formatTaskId,
|
||||
findTaskById,
|
||||
truncate,
|
||||
findCycles,
|
||||
toKebabCase,
|
||||
detectCamelCaseFlags
|
||||
};
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
/**
|
||||
* This script prepares the package for publication to NPM.
|
||||
* It ensures all necessary files are included and properly configured.
|
||||
*
|
||||
*
|
||||
* Additional options:
|
||||
* --patch: Increment patch version (default)
|
||||
* --minor: Increment minor version
|
||||
@@ -22,175 +22,189 @@ const __dirname = dirname(__filename);
|
||||
|
||||
// Define colors for console output
|
||||
const COLORS = {
|
||||
reset: '\x1b[0m',
|
||||
bright: '\x1b[1m',
|
||||
dim: '\x1b[2m',
|
||||
red: '\x1b[31m',
|
||||
green: '\x1b[32m',
|
||||
yellow: '\x1b[33m',
|
||||
blue: '\x1b[34m',
|
||||
magenta: '\x1b[35m',
|
||||
cyan: '\x1b[36m'
|
||||
reset: '\x1b[0m',
|
||||
bright: '\x1b[1m',
|
||||
dim: '\x1b[2m',
|
||||
red: '\x1b[31m',
|
||||
green: '\x1b[32m',
|
||||
yellow: '\x1b[33m',
|
||||
blue: '\x1b[34m',
|
||||
magenta: '\x1b[35m',
|
||||
cyan: '\x1b[36m'
|
||||
};
|
||||
|
||||
// Parse command line arguments
|
||||
const args = process.argv.slice(2);
|
||||
const versionBump = args.includes('--major') ? 'major' :
|
||||
args.includes('--minor') ? 'minor' :
|
||||
'patch';
|
||||
const versionBump = args.includes('--major')
|
||||
? 'major'
|
||||
: args.includes('--minor')
|
||||
? 'minor'
|
||||
: 'patch';
|
||||
|
||||
// Check for explicit version
|
||||
const versionArg = args.find(arg => arg.startsWith('--version='));
|
||||
const versionArg = args.find((arg) => arg.startsWith('--version='));
|
||||
const explicitVersion = versionArg ? versionArg.split('=')[1] : null;
|
||||
|
||||
// Log function with color support
|
||||
function log(level, ...args) {
|
||||
const prefix = {
|
||||
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
|
||||
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
|
||||
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
|
||||
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
|
||||
}[level.toLowerCase()];
|
||||
|
||||
console.log(prefix, ...args);
|
||||
const prefix = {
|
||||
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
|
||||
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
|
||||
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
|
||||
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
|
||||
}[level.toLowerCase()];
|
||||
|
||||
console.log(prefix, ...args);
|
||||
}
|
||||
|
||||
// Function to check if a file exists
|
||||
function fileExists(filePath) {
|
||||
return fs.existsSync(filePath);
|
||||
return fs.existsSync(filePath);
|
||||
}
|
||||
|
||||
// Function to ensure a file is executable
|
||||
function ensureExecutable(filePath) {
|
||||
try {
|
||||
fs.chmodSync(filePath, '755');
|
||||
log('info', `Made ${filePath} executable`);
|
||||
} catch (error) {
|
||||
log('error', `Failed to make ${filePath} executable:`, error.message);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
try {
|
||||
fs.chmodSync(filePath, '755');
|
||||
log('info', `Made ${filePath} executable`);
|
||||
} catch (error) {
|
||||
log('error', `Failed to make ${filePath} executable:`, error.message);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Function to sync template files
|
||||
function syncTemplateFiles() {
|
||||
// We no longer need to sync files since we're using them directly
|
||||
log('info', 'Template syncing has been deprecated - using source files directly');
|
||||
return true;
|
||||
// We no longer need to sync files since we're using them directly
|
||||
log(
|
||||
'info',
|
||||
'Template syncing has been deprecated - using source files directly'
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Function to increment version
|
||||
function incrementVersion(currentVersion, type = 'patch') {
|
||||
const [major, minor, patch] = currentVersion.split('.').map(Number);
|
||||
|
||||
switch (type) {
|
||||
case 'major':
|
||||
return `${major + 1}.0.0`;
|
||||
case 'minor':
|
||||
return `${major}.${minor + 1}.0`;
|
||||
case 'patch':
|
||||
default:
|
||||
return `${major}.${minor}.${patch + 1}`;
|
||||
}
|
||||
const [major, minor, patch] = currentVersion.split('.').map(Number);
|
||||
|
||||
switch (type) {
|
||||
case 'major':
|
||||
return `${major + 1}.0.0`;
|
||||
case 'minor':
|
||||
return `${major}.${minor + 1}.0`;
|
||||
case 'patch':
|
||||
default:
|
||||
return `${major}.${minor}.${patch + 1}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Main function to prepare the package
|
||||
function preparePackage() {
|
||||
const rootDir = path.join(__dirname, '..');
|
||||
log('info', `Preparing package in ${rootDir}`);
|
||||
|
||||
// Update version in package.json
|
||||
const packageJsonPath = path.join(rootDir, 'package.json');
|
||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
||||
const currentVersion = packageJson.version;
|
||||
|
||||
let newVersion;
|
||||
if (explicitVersion) {
|
||||
newVersion = explicitVersion;
|
||||
log('info', `Setting version to specified ${newVersion} (was ${currentVersion})`);
|
||||
} else {
|
||||
newVersion = incrementVersion(currentVersion, versionBump);
|
||||
log('info', `Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`);
|
||||
}
|
||||
|
||||
packageJson.version = newVersion;
|
||||
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
|
||||
log('success', `Updated package.json version to ${newVersion}`);
|
||||
|
||||
// Check for required files
|
||||
const requiredFiles = [
|
||||
'package.json',
|
||||
'README-task-master.md',
|
||||
'index.js',
|
||||
'scripts/init.js',
|
||||
'scripts/dev.js',
|
||||
'assets/env.example',
|
||||
'assets/gitignore',
|
||||
'assets/example_prd.txt',
|
||||
'assets/scripts_README.md',
|
||||
'.cursor/rules/dev_workflow.mdc',
|
||||
'.cursor/rules/cursor_rules.mdc',
|
||||
'.cursor/rules/self_improve.mdc'
|
||||
];
|
||||
|
||||
let allFilesExist = true;
|
||||
for (const file of requiredFiles) {
|
||||
const filePath = path.join(rootDir, file);
|
||||
if (!fileExists(filePath)) {
|
||||
log('error', `Required file ${file} does not exist`);
|
||||
allFilesExist = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!allFilesExist) {
|
||||
log('error', 'Some required files are missing. Package preparation failed.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Ensure scripts are executable
|
||||
const executableScripts = [
|
||||
'scripts/init.js',
|
||||
'scripts/dev.js'
|
||||
];
|
||||
|
||||
let allScriptsExecutable = true;
|
||||
for (const script of executableScripts) {
|
||||
const scriptPath = path.join(rootDir, script);
|
||||
if (!ensureExecutable(scriptPath)) {
|
||||
allScriptsExecutable = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!allScriptsExecutable) {
|
||||
log('warn', 'Some scripts could not be made executable. This may cause issues.');
|
||||
}
|
||||
|
||||
// Run npm pack to test package creation
|
||||
try {
|
||||
log('info', 'Running npm pack to test package creation...');
|
||||
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
|
||||
log('info', output);
|
||||
} catch (error) {
|
||||
log('error', 'Failed to run npm pack:', error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Make scripts executable
|
||||
log('info', 'Making scripts executable...');
|
||||
try {
|
||||
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
|
||||
log('info', 'Made scripts/init.js executable');
|
||||
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
|
||||
log('info', 'Made scripts/dev.js executable');
|
||||
} catch (error) {
|
||||
log('error', 'Failed to make scripts executable:', error.message);
|
||||
}
|
||||
|
||||
log('success', `Package preparation completed successfully! 🎉`);
|
||||
log('success', `Version updated to ${newVersion}`);
|
||||
log('info', 'You can now publish the package with:');
|
||||
log('info', ' npm publish');
|
||||
const rootDir = path.join(__dirname, '..');
|
||||
log('info', `Preparing package in ${rootDir}`);
|
||||
|
||||
// Update version in package.json
|
||||
const packageJsonPath = path.join(rootDir, 'package.json');
|
||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
||||
const currentVersion = packageJson.version;
|
||||
|
||||
let newVersion;
|
||||
if (explicitVersion) {
|
||||
newVersion = explicitVersion;
|
||||
log(
|
||||
'info',
|
||||
`Setting version to specified ${newVersion} (was ${currentVersion})`
|
||||
);
|
||||
} else {
|
||||
newVersion = incrementVersion(currentVersion, versionBump);
|
||||
log(
|
||||
'info',
|
||||
`Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`
|
||||
);
|
||||
}
|
||||
|
||||
packageJson.version = newVersion;
|
||||
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
|
||||
log('success', `Updated package.json version to ${newVersion}`);
|
||||
|
||||
// Check for required files
|
||||
const requiredFiles = [
|
||||
'package.json',
|
||||
'README-task-master.md',
|
||||
'index.js',
|
||||
'scripts/init.js',
|
||||
'scripts/dev.js',
|
||||
'assets/env.example',
|
||||
'assets/gitignore',
|
||||
'assets/example_prd.txt',
|
||||
'assets/scripts_README.md',
|
||||
'.cursor/rules/dev_workflow.mdc',
|
||||
'.cursor/rules/cursor_rules.mdc',
|
||||
'.cursor/rules/self_improve.mdc'
|
||||
];
|
||||
|
||||
let allFilesExist = true;
|
||||
for (const file of requiredFiles) {
|
||||
const filePath = path.join(rootDir, file);
|
||||
if (!fileExists(filePath)) {
|
||||
log('error', `Required file ${file} does not exist`);
|
||||
allFilesExist = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!allFilesExist) {
|
||||
log(
|
||||
'error',
|
||||
'Some required files are missing. Package preparation failed.'
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Ensure scripts are executable
|
||||
const executableScripts = ['scripts/init.js', 'scripts/dev.js'];
|
||||
|
||||
let allScriptsExecutable = true;
|
||||
for (const script of executableScripts) {
|
||||
const scriptPath = path.join(rootDir, script);
|
||||
if (!ensureExecutable(scriptPath)) {
|
||||
allScriptsExecutable = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!allScriptsExecutable) {
|
||||
log(
|
||||
'warn',
|
||||
'Some scripts could not be made executable. This may cause issues.'
|
||||
);
|
||||
}
|
||||
|
||||
// Run npm pack to test package creation
|
||||
try {
|
||||
log('info', 'Running npm pack to test package creation...');
|
||||
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
|
||||
log('info', output);
|
||||
} catch (error) {
|
||||
log('error', 'Failed to run npm pack:', error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Make scripts executable
|
||||
log('info', 'Making scripts executable...');
|
||||
try {
|
||||
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
|
||||
log('info', 'Made scripts/init.js executable');
|
||||
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
|
||||
log('info', 'Made scripts/dev.js executable');
|
||||
} catch (error) {
|
||||
log('error', 'Failed to make scripts executable:', error.message);
|
||||
}
|
||||
|
||||
log('success', `Package preparation completed successfully! 🎉`);
|
||||
log('success', `Version updated to ${newVersion}`);
|
||||
log('info', 'You can now publish the package with:');
|
||||
log('info', ' npm publish');
|
||||
}
|
||||
|
||||
// Run the preparation
|
||||
preparePackage();
|
||||
preparePackage();
|
||||
|
||||
@@ -1,203 +1,203 @@
|
||||
{
|
||||
"meta": {
|
||||
"generatedAt": "2025-03-24T20:01:35.986Z",
|
||||
"tasksAnalyzed": 24,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Your Project Name",
|
||||
"usedResearch": false
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 1,
|
||||
"taskTitle": "Implement Task Data Structure",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
|
||||
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
|
||||
},
|
||||
{
|
||||
"taskId": 2,
|
||||
"taskTitle": "Develop Command Line Interface Foundation",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
|
||||
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
|
||||
},
|
||||
{
|
||||
"taskId": 3,
|
||||
"taskTitle": "Implement Basic Task Operations",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
|
||||
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
|
||||
},
|
||||
{
|
||||
"taskId": 4,
|
||||
"taskTitle": "Create Task File Generation System",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
|
||||
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
|
||||
},
|
||||
{
|
||||
"taskId": 5,
|
||||
"taskTitle": "Integrate Anthropic Claude API",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
|
||||
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
|
||||
},
|
||||
{
|
||||
"taskId": 6,
|
||||
"taskTitle": "Build PRD Parsing System",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
|
||||
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
|
||||
},
|
||||
{
|
||||
"taskId": 7,
|
||||
"taskTitle": "Implement Task Expansion with Claude",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
|
||||
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
|
||||
},
|
||||
{
|
||||
"taskId": 8,
|
||||
"taskTitle": "Develop Implementation Drift Handling",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
|
||||
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
|
||||
},
|
||||
{
|
||||
"taskId": 9,
|
||||
"taskTitle": "Integrate Perplexity API",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
|
||||
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
|
||||
},
|
||||
{
|
||||
"taskId": 10,
|
||||
"taskTitle": "Create Research-Backed Subtask Generation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
|
||||
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
|
||||
},
|
||||
{
|
||||
"taskId": 11,
|
||||
"taskTitle": "Implement Batch Operations",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
|
||||
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
|
||||
},
|
||||
{
|
||||
"taskId": 12,
|
||||
"taskTitle": "Develop Project Initialization System",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
|
||||
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
|
||||
},
|
||||
{
|
||||
"taskId": 13,
|
||||
"taskTitle": "Create Cursor Rules Implementation",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
|
||||
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
|
||||
},
|
||||
{
|
||||
"taskId": 14,
|
||||
"taskTitle": "Develop Agent Workflow Guidelines",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
|
||||
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
|
||||
},
|
||||
{
|
||||
"taskId": 15,
|
||||
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
|
||||
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
|
||||
},
|
||||
{
|
||||
"taskId": 16,
|
||||
"taskTitle": "Create Configuration Management System",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
|
||||
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
|
||||
},
|
||||
{
|
||||
"taskId": 17,
|
||||
"taskTitle": "Implement Comprehensive Logging System",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
|
||||
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
|
||||
},
|
||||
{
|
||||
"taskId": 18,
|
||||
"taskTitle": "Create Comprehensive User Documentation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
|
||||
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
|
||||
},
|
||||
{
|
||||
"taskId": 19,
|
||||
"taskTitle": "Implement Error Handling and Recovery",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
|
||||
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
|
||||
},
|
||||
{
|
||||
"taskId": 20,
|
||||
"taskTitle": "Create Token Usage Tracking and Cost Management",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
|
||||
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
|
||||
},
|
||||
{
|
||||
"taskId": 21,
|
||||
"taskTitle": "Refactor dev.js into Modular Components",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
|
||||
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
|
||||
},
|
||||
{
|
||||
"taskId": 22,
|
||||
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
|
||||
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
|
||||
},
|
||||
{
|
||||
"taskId": 23,
|
||||
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
|
||||
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
|
||||
},
|
||||
{
|
||||
"taskId": 24,
|
||||
"taskTitle": "Implement AI-Powered Test Generation Command",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
|
||||
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
|
||||
}
|
||||
]
|
||||
}
|
||||
"meta": {
|
||||
"generatedAt": "2025-03-24T20:01:35.986Z",
|
||||
"tasksAnalyzed": 24,
|
||||
"thresholdScore": 5,
|
||||
"projectName": "Your Project Name",
|
||||
"usedResearch": false
|
||||
},
|
||||
"complexityAnalysis": [
|
||||
{
|
||||
"taskId": 1,
|
||||
"taskTitle": "Implement Task Data Structure",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
|
||||
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
|
||||
},
|
||||
{
|
||||
"taskId": 2,
|
||||
"taskTitle": "Develop Command Line Interface Foundation",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
|
||||
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
|
||||
},
|
||||
{
|
||||
"taskId": 3,
|
||||
"taskTitle": "Implement Basic Task Operations",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
|
||||
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
|
||||
},
|
||||
{
|
||||
"taskId": 4,
|
||||
"taskTitle": "Create Task File Generation System",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
|
||||
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
|
||||
},
|
||||
{
|
||||
"taskId": 5,
|
||||
"taskTitle": "Integrate Anthropic Claude API",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
|
||||
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
|
||||
},
|
||||
{
|
||||
"taskId": 6,
|
||||
"taskTitle": "Build PRD Parsing System",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
|
||||
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
|
||||
},
|
||||
{
|
||||
"taskId": 7,
|
||||
"taskTitle": "Implement Task Expansion with Claude",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
|
||||
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
|
||||
},
|
||||
{
|
||||
"taskId": 8,
|
||||
"taskTitle": "Develop Implementation Drift Handling",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
|
||||
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
|
||||
},
|
||||
{
|
||||
"taskId": 9,
|
||||
"taskTitle": "Integrate Perplexity API",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
|
||||
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
|
||||
},
|
||||
{
|
||||
"taskId": 10,
|
||||
"taskTitle": "Create Research-Backed Subtask Generation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
|
||||
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
|
||||
},
|
||||
{
|
||||
"taskId": 11,
|
||||
"taskTitle": "Implement Batch Operations",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
|
||||
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
|
||||
},
|
||||
{
|
||||
"taskId": 12,
|
||||
"taskTitle": "Develop Project Initialization System",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
|
||||
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
|
||||
},
|
||||
{
|
||||
"taskId": 13,
|
||||
"taskTitle": "Create Cursor Rules Implementation",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
|
||||
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
|
||||
},
|
||||
{
|
||||
"taskId": 14,
|
||||
"taskTitle": "Develop Agent Workflow Guidelines",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
|
||||
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
|
||||
},
|
||||
{
|
||||
"taskId": 15,
|
||||
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
|
||||
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
|
||||
},
|
||||
{
|
||||
"taskId": 16,
|
||||
"taskTitle": "Create Configuration Management System",
|
||||
"complexityScore": 6,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
|
||||
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
|
||||
},
|
||||
{
|
||||
"taskId": 17,
|
||||
"taskTitle": "Implement Comprehensive Logging System",
|
||||
"complexityScore": 5,
|
||||
"recommendedSubtasks": 3,
|
||||
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
|
||||
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
|
||||
},
|
||||
{
|
||||
"taskId": 18,
|
||||
"taskTitle": "Create Comprehensive User Documentation",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
|
||||
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
|
||||
},
|
||||
{
|
||||
"taskId": 19,
|
||||
"taskTitle": "Implement Error Handling and Recovery",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
|
||||
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
|
||||
},
|
||||
{
|
||||
"taskId": 20,
|
||||
"taskTitle": "Create Token Usage Tracking and Cost Management",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
|
||||
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
|
||||
},
|
||||
{
|
||||
"taskId": 21,
|
||||
"taskTitle": "Refactor dev.js into Modular Components",
|
||||
"complexityScore": 8,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
|
||||
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
|
||||
},
|
||||
{
|
||||
"taskId": 22,
|
||||
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
|
||||
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
|
||||
},
|
||||
{
|
||||
"taskId": 23,
|
||||
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
|
||||
"complexityScore": 9,
|
||||
"recommendedSubtasks": 5,
|
||||
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
|
||||
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
|
||||
},
|
||||
{
|
||||
"taskId": 24,
|
||||
"taskTitle": "Implement AI-Powered Test Generation Command",
|
||||
"complexityScore": 7,
|
||||
"recommendedSubtasks": 4,
|
||||
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
|
||||
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
/**
|
||||
* test-claude-errors.js
|
||||
*
|
||||
*
|
||||
* A test script to verify the error handling and retry logic in the callClaude function.
|
||||
* This script creates a modified version of dev.js that simulates different error scenarios.
|
||||
*/
|
||||
@@ -22,7 +22,7 @@ dotenv.config();
|
||||
|
||||
// Create a simple PRD for testing
|
||||
const createTestPRD = () => {
|
||||
return `# Test PRD for Error Handling
|
||||
return `# Test PRD for Error Handling
|
||||
|
||||
## Overview
|
||||
This is a simple test PRD to verify the error handling in the callClaude function.
|
||||
@@ -36,21 +36,22 @@ This is a simple test PRD to verify the error handling in the callClaude functio
|
||||
|
||||
// Create a modified version of dev.js that simulates errors
|
||||
function createErrorSimulationScript(errorType, failureCount = 2) {
|
||||
// Read the original dev.js file
|
||||
const devJsPath = path.join(__dirname, 'dev.js');
|
||||
const devJsContent = fs.readFileSync(devJsPath, 'utf8');
|
||||
|
||||
// Create a modified version that simulates errors
|
||||
let modifiedContent = devJsContent;
|
||||
|
||||
// Find the anthropic.messages.create call and replace it with our mock
|
||||
const anthropicCallRegex = /const response = await anthropic\.messages\.create\(/;
|
||||
|
||||
let mockCode = '';
|
||||
|
||||
switch (errorType) {
|
||||
case 'network':
|
||||
mockCode = `
|
||||
// Read the original dev.js file
|
||||
const devJsPath = path.join(__dirname, 'dev.js');
|
||||
const devJsContent = fs.readFileSync(devJsPath, 'utf8');
|
||||
|
||||
// Create a modified version that simulates errors
|
||||
let modifiedContent = devJsContent;
|
||||
|
||||
// Find the anthropic.messages.create call and replace it with our mock
|
||||
const anthropicCallRegex =
|
||||
/const response = await anthropic\.messages\.create\(/;
|
||||
|
||||
let mockCode = '';
|
||||
|
||||
switch (errorType) {
|
||||
case 'network':
|
||||
mockCode = `
|
||||
// Mock for network error simulation
|
||||
let currentAttempt = 0;
|
||||
const failureCount = ${failureCount};
|
||||
@@ -65,10 +66,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
|
||||
}
|
||||
|
||||
const response = await anthropic.messages.create(`;
|
||||
break;
|
||||
|
||||
case 'timeout':
|
||||
mockCode = `
|
||||
break;
|
||||
|
||||
case 'timeout':
|
||||
mockCode = `
|
||||
// Mock for timeout error simulation
|
||||
let currentAttempt = 0;
|
||||
const failureCount = ${failureCount};
|
||||
@@ -83,10 +84,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
|
||||
}
|
||||
|
||||
const response = await anthropic.messages.create(`;
|
||||
break;
|
||||
|
||||
case 'invalid-json':
|
||||
mockCode = `
|
||||
break;
|
||||
|
||||
case 'invalid-json':
|
||||
mockCode = `
|
||||
// Mock for invalid JSON response
|
||||
let currentAttempt = 0;
|
||||
const failureCount = ${failureCount};
|
||||
@@ -107,10 +108,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
|
||||
}
|
||||
|
||||
const response = await anthropic.messages.create(`;
|
||||
break;
|
||||
|
||||
case 'empty-tasks':
|
||||
mockCode = `
|
||||
break;
|
||||
|
||||
case 'empty-tasks':
|
||||
mockCode = `
|
||||
// Mock for empty tasks array
|
||||
let currentAttempt = 0;
|
||||
const failureCount = ${failureCount};
|
||||
@@ -131,82 +132,87 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
|
||||
}
|
||||
|
||||
const response = await anthropic.messages.create(`;
|
||||
break;
|
||||
|
||||
default:
|
||||
// No modification
|
||||
mockCode = `const response = await anthropic.messages.create(`;
|
||||
}
|
||||
|
||||
// Replace the anthropic call with our mock
|
||||
modifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);
|
||||
|
||||
// Write the modified script to a temporary file
|
||||
const tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);
|
||||
fs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');
|
||||
|
||||
return tempScriptPath;
|
||||
break;
|
||||
|
||||
default:
|
||||
// No modification
|
||||
mockCode = `const response = await anthropic.messages.create(`;
|
||||
}
|
||||
|
||||
// Replace the anthropic call with our mock
|
||||
modifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);
|
||||
|
||||
// Write the modified script to a temporary file
|
||||
const tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);
|
||||
fs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');
|
||||
|
||||
return tempScriptPath;
|
||||
}
|
||||
|
||||
// Function to run a test with a specific error type
|
||||
async function runErrorTest(errorType, numTasks = 5, failureCount = 2) {
|
||||
console.log(`\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);
|
||||
|
||||
// Create a test PRD
|
||||
const testPRD = createTestPRD();
|
||||
const testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);
|
||||
fs.writeFileSync(testPRDPath, testPRD, 'utf8');
|
||||
|
||||
// Create a modified dev.js that simulates the specified error
|
||||
const tempScriptPath = createErrorSimulationScript(errorType, failureCount);
|
||||
|
||||
console.log(`Created test PRD at ${testPRDPath}`);
|
||||
console.log(`Created error simulation script at ${tempScriptPath}`);
|
||||
console.log(`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`);
|
||||
|
||||
try {
|
||||
// Run the modified script
|
||||
execSync(`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`, {
|
||||
stdio: 'inherit'
|
||||
});
|
||||
console.log(`${errorType} error test completed successfully`);
|
||||
} catch (error) {
|
||||
console.error(`${errorType} error test failed:`, error.message);
|
||||
} finally {
|
||||
// Clean up temporary files
|
||||
if (fs.existsSync(tempScriptPath)) {
|
||||
fs.unlinkSync(tempScriptPath);
|
||||
}
|
||||
if (fs.existsSync(testPRDPath)) {
|
||||
fs.unlinkSync(testPRDPath);
|
||||
}
|
||||
}
|
||||
console.log(`\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);
|
||||
|
||||
// Create a test PRD
|
||||
const testPRD = createTestPRD();
|
||||
const testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);
|
||||
fs.writeFileSync(testPRDPath, testPRD, 'utf8');
|
||||
|
||||
// Create a modified dev.js that simulates the specified error
|
||||
const tempScriptPath = createErrorSimulationScript(errorType, failureCount);
|
||||
|
||||
console.log(`Created test PRD at ${testPRDPath}`);
|
||||
console.log(`Created error simulation script at ${tempScriptPath}`);
|
||||
console.log(
|
||||
`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`
|
||||
);
|
||||
|
||||
try {
|
||||
// Run the modified script
|
||||
execSync(
|
||||
`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`,
|
||||
{
|
||||
stdio: 'inherit'
|
||||
}
|
||||
);
|
||||
console.log(`${errorType} error test completed successfully`);
|
||||
} catch (error) {
|
||||
console.error(`${errorType} error test failed:`, error.message);
|
||||
} finally {
|
||||
// Clean up temporary files
|
||||
if (fs.existsSync(tempScriptPath)) {
|
||||
fs.unlinkSync(tempScriptPath);
|
||||
}
|
||||
if (fs.existsSync(testPRDPath)) {
|
||||
fs.unlinkSync(testPRDPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function to run all error tests
|
||||
async function runAllErrorTests() {
|
||||
console.log('Starting error handling tests for callClaude function...');
|
||||
|
||||
// Test 1: Network error with automatic retry
|
||||
await runErrorTest('network', 5, 2);
|
||||
|
||||
// Test 2: Timeout error with automatic retry
|
||||
await runErrorTest('timeout', 5, 2);
|
||||
|
||||
// Test 3: Invalid JSON response with task reduction
|
||||
await runErrorTest('invalid-json', 10, 2);
|
||||
|
||||
// Test 4: Empty tasks array with task reduction
|
||||
await runErrorTest('empty-tasks', 15, 2);
|
||||
|
||||
// Test 5: Exhausted retries (more failures than MAX_RETRIES)
|
||||
await runErrorTest('network', 5, 4);
|
||||
|
||||
console.log('\nAll error tests completed!');
|
||||
console.log('Starting error handling tests for callClaude function...');
|
||||
|
||||
// Test 1: Network error with automatic retry
|
||||
await runErrorTest('network', 5, 2);
|
||||
|
||||
// Test 2: Timeout error with automatic retry
|
||||
await runErrorTest('timeout', 5, 2);
|
||||
|
||||
// Test 3: Invalid JSON response with task reduction
|
||||
await runErrorTest('invalid-json', 10, 2);
|
||||
|
||||
// Test 4: Empty tasks array with task reduction
|
||||
await runErrorTest('empty-tasks', 15, 2);
|
||||
|
||||
// Test 5: Exhausted retries (more failures than MAX_RETRIES)
|
||||
await runErrorTest('network', 5, 4);
|
||||
|
||||
console.log('\nAll error tests completed!');
|
||||
}
|
||||
|
||||
// Run the tests
|
||||
runAllErrorTests().catch(error => {
|
||||
console.error('Error running tests:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
runAllErrorTests().catch((error) => {
|
||||
console.error('Error running tests:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
/**
|
||||
* test-claude.js
|
||||
*
|
||||
*
|
||||
* A simple test script to verify the improvements to the callClaude function.
|
||||
* This script tests different scenarios:
|
||||
* 1. Normal operation with a small PRD
|
||||
@@ -24,11 +24,11 @@ dotenv.config();
|
||||
|
||||
// Create a simple PRD for testing
|
||||
const createTestPRD = (size = 'small', taskComplexity = 'simple') => {
|
||||
let content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\n\n`;
|
||||
|
||||
// Add more content based on size
|
||||
if (size === 'small') {
|
||||
content += `
|
||||
let content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\n\n`;
|
||||
|
||||
// Add more content based on size
|
||||
if (size === 'small') {
|
||||
content += `
|
||||
## Overview
|
||||
This is a small test PRD to verify the callClaude function improvements.
|
||||
|
||||
@@ -44,9 +44,9 @@ This is a small test PRD to verify the callClaude function improvements.
|
||||
- Backend: Node.js
|
||||
- Database: MongoDB
|
||||
`;
|
||||
} else if (size === 'medium') {
|
||||
// Medium-sized PRD with more requirements
|
||||
content += `
|
||||
} else if (size === 'medium') {
|
||||
// Medium-sized PRD with more requirements
|
||||
content += `
|
||||
## Overview
|
||||
This is a medium-sized test PRD to verify the callClaude function improvements.
|
||||
|
||||
@@ -76,20 +76,20 @@ This is a medium-sized test PRD to verify the callClaude function improvements.
|
||||
- CI/CD: GitHub Actions
|
||||
- Monitoring: Prometheus and Grafana
|
||||
`;
|
||||
} else if (size === 'large') {
|
||||
// Large PRD with many requirements
|
||||
content += `
|
||||
} else if (size === 'large') {
|
||||
// Large PRD with many requirements
|
||||
content += `
|
||||
## Overview
|
||||
This is a large test PRD to verify the callClaude function improvements.
|
||||
|
||||
## Requirements
|
||||
`;
|
||||
// Generate 30 requirements
|
||||
for (let i = 1; i <= 30; i++) {
|
||||
content += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\n`;
|
||||
}
|
||||
|
||||
content += `
|
||||
// Generate 30 requirements
|
||||
for (let i = 1; i <= 30; i++) {
|
||||
content += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\n`;
|
||||
}
|
||||
|
||||
content += `
|
||||
## Technical Stack
|
||||
- Frontend: React with TypeScript
|
||||
- Backend: Node.js with Express
|
||||
@@ -101,12 +101,12 @@ This is a large test PRD to verify the callClaude function improvements.
|
||||
|
||||
## User Stories
|
||||
`;
|
||||
// Generate 20 user stories
|
||||
for (let i = 1; i <= 20; i++) {
|
||||
content += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\n`;
|
||||
}
|
||||
|
||||
content += `
|
||||
// Generate 20 user stories
|
||||
for (let i = 1; i <= 20; i++) {
|
||||
content += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\n`;
|
||||
}
|
||||
|
||||
content += `
|
||||
## Non-Functional Requirements
|
||||
- Performance: The system should respond within 200ms
|
||||
- Scalability: The system should handle 10,000 concurrent users
|
||||
@@ -114,11 +114,11 @@ This is a large test PRD to verify the callClaude function improvements.
|
||||
- Security: The system should comply with OWASP top 10
|
||||
- Accessibility: The system should comply with WCAG 2.1 AA
|
||||
`;
|
||||
}
|
||||
|
||||
// Add complexity if needed
|
||||
if (taskComplexity === 'complex') {
|
||||
content += `
|
||||
}
|
||||
|
||||
// Add complexity if needed
|
||||
if (taskComplexity === 'complex') {
|
||||
content += `
|
||||
## Complex Requirements
|
||||
- Implement a real-time collaboration system
|
||||
- Add a machine learning-based recommendation engine
|
||||
@@ -131,101 +131,110 @@ This is a large test PRD to verify the callClaude function improvements.
|
||||
- Implement a custom reporting system
|
||||
- Add a custom dashboard builder
|
||||
`;
|
||||
}
|
||||
|
||||
return content;
|
||||
}
|
||||
|
||||
return content;
|
||||
};
|
||||
|
||||
// Function to run the tests
|
||||
async function runTests() {
|
||||
console.log('Starting tests for callClaude function improvements...');
|
||||
|
||||
try {
|
||||
// Instead of importing the callClaude function directly, we'll use the dev.js script
|
||||
// with our test PRDs by running it as a child process
|
||||
|
||||
// Test 1: Small PRD, 5 tasks
|
||||
console.log('\n=== Test 1: Small PRD, 5 tasks ===');
|
||||
const smallPRD = createTestPRD('small', 'simple');
|
||||
const smallPRDPath = path.join(__dirname, 'test-small-prd.txt');
|
||||
fs.writeFileSync(smallPRDPath, smallPRD, 'utf8');
|
||||
|
||||
console.log(`Created test PRD at ${smallPRDPath}`);
|
||||
console.log('Running dev.js with small PRD...');
|
||||
|
||||
// Use the child_process module to run the dev.js script
|
||||
const { execSync } = await import('child_process');
|
||||
|
||||
try {
|
||||
const smallResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`, {
|
||||
stdio: 'inherit'
|
||||
});
|
||||
console.log('Small PRD test completed successfully');
|
||||
} catch (error) {
|
||||
console.error('Small PRD test failed:', error.message);
|
||||
}
|
||||
|
||||
// Test 2: Medium PRD, 15 tasks
|
||||
console.log('\n=== Test 2: Medium PRD, 15 tasks ===');
|
||||
const mediumPRD = createTestPRD('medium', 'simple');
|
||||
const mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');
|
||||
fs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');
|
||||
|
||||
console.log(`Created test PRD at ${mediumPRDPath}`);
|
||||
console.log('Running dev.js with medium PRD...');
|
||||
|
||||
try {
|
||||
const mediumResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`, {
|
||||
stdio: 'inherit'
|
||||
});
|
||||
console.log('Medium PRD test completed successfully');
|
||||
} catch (error) {
|
||||
console.error('Medium PRD test failed:', error.message);
|
||||
}
|
||||
|
||||
// Test 3: Large PRD, 25 tasks
|
||||
console.log('\n=== Test 3: Large PRD, 25 tasks ===');
|
||||
const largePRD = createTestPRD('large', 'complex');
|
||||
const largePRDPath = path.join(__dirname, 'test-large-prd.txt');
|
||||
fs.writeFileSync(largePRDPath, largePRD, 'utf8');
|
||||
|
||||
console.log(`Created test PRD at ${largePRDPath}`);
|
||||
console.log('Running dev.js with large PRD...');
|
||||
|
||||
try {
|
||||
const largeResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`, {
|
||||
stdio: 'inherit'
|
||||
});
|
||||
console.log('Large PRD test completed successfully');
|
||||
} catch (error) {
|
||||
console.error('Large PRD test failed:', error.message);
|
||||
}
|
||||
|
||||
console.log('\nAll tests completed!');
|
||||
} catch (error) {
|
||||
console.error('Test failed:', error);
|
||||
} finally {
|
||||
// Clean up test files
|
||||
console.log('\nCleaning up test files...');
|
||||
const testFiles = [
|
||||
path.join(__dirname, 'test-small-prd.txt'),
|
||||
path.join(__dirname, 'test-medium-prd.txt'),
|
||||
path.join(__dirname, 'test-large-prd.txt')
|
||||
];
|
||||
|
||||
testFiles.forEach(file => {
|
||||
if (fs.existsSync(file)) {
|
||||
fs.unlinkSync(file);
|
||||
console.log(`Deleted ${file}`);
|
||||
}
|
||||
});
|
||||
|
||||
console.log('Cleanup complete.');
|
||||
}
|
||||
console.log('Starting tests for callClaude function improvements...');
|
||||
|
||||
try {
|
||||
// Instead of importing the callClaude function directly, we'll use the dev.js script
|
||||
// with our test PRDs by running it as a child process
|
||||
|
||||
// Test 1: Small PRD, 5 tasks
|
||||
console.log('\n=== Test 1: Small PRD, 5 tasks ===');
|
||||
const smallPRD = createTestPRD('small', 'simple');
|
||||
const smallPRDPath = path.join(__dirname, 'test-small-prd.txt');
|
||||
fs.writeFileSync(smallPRDPath, smallPRD, 'utf8');
|
||||
|
||||
console.log(`Created test PRD at ${smallPRDPath}`);
|
||||
console.log('Running dev.js with small PRD...');
|
||||
|
||||
// Use the child_process module to run the dev.js script
|
||||
const { execSync } = await import('child_process');
|
||||
|
||||
try {
|
||||
const smallResult = execSync(
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`,
|
||||
{
|
||||
stdio: 'inherit'
|
||||
}
|
||||
);
|
||||
console.log('Small PRD test completed successfully');
|
||||
} catch (error) {
|
||||
console.error('Small PRD test failed:', error.message);
|
||||
}
|
||||
|
||||
// Test 2: Medium PRD, 15 tasks
|
||||
console.log('\n=== Test 2: Medium PRD, 15 tasks ===');
|
||||
const mediumPRD = createTestPRD('medium', 'simple');
|
||||
const mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');
|
||||
fs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');
|
||||
|
||||
console.log(`Created test PRD at ${mediumPRDPath}`);
|
||||
console.log('Running dev.js with medium PRD...');
|
||||
|
||||
try {
|
||||
const mediumResult = execSync(
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`,
|
||||
{
|
||||
stdio: 'inherit'
|
||||
}
|
||||
);
|
||||
console.log('Medium PRD test completed successfully');
|
||||
} catch (error) {
|
||||
console.error('Medium PRD test failed:', error.message);
|
||||
}
|
||||
|
||||
// Test 3: Large PRD, 25 tasks
|
||||
console.log('\n=== Test 3: Large PRD, 25 tasks ===');
|
||||
const largePRD = createTestPRD('large', 'complex');
|
||||
const largePRDPath = path.join(__dirname, 'test-large-prd.txt');
|
||||
fs.writeFileSync(largePRDPath, largePRD, 'utf8');
|
||||
|
||||
console.log(`Created test PRD at ${largePRDPath}`);
|
||||
console.log('Running dev.js with large PRD...');
|
||||
|
||||
try {
|
||||
const largeResult = execSync(
|
||||
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`,
|
||||
{
|
||||
stdio: 'inherit'
|
||||
}
|
||||
);
|
||||
console.log('Large PRD test completed successfully');
|
||||
} catch (error) {
|
||||
console.error('Large PRD test failed:', error.message);
|
||||
}
|
||||
|
||||
console.log('\nAll tests completed!');
|
||||
} catch (error) {
|
||||
console.error('Test failed:', error);
|
||||
} finally {
|
||||
// Clean up test files
|
||||
console.log('\nCleaning up test files...');
|
||||
const testFiles = [
|
||||
path.join(__dirname, 'test-small-prd.txt'),
|
||||
path.join(__dirname, 'test-medium-prd.txt'),
|
||||
path.join(__dirname, 'test-large-prd.txt')
|
||||
];
|
||||
|
||||
testFiles.forEach((file) => {
|
||||
if (fs.existsSync(file)) {
|
||||
fs.unlinkSync(file);
|
||||
console.log(`Deleted ${file}`);
|
||||
}
|
||||
});
|
||||
|
||||
console.log('Cleanup complete.');
|
||||
}
|
||||
}
|
||||
|
||||
// Run the tests
|
||||
runTests().catch(error => {
|
||||
console.error('Error running tests:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
runTests().catch((error) => {
|
||||
console.error('Error running tests:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Task ID: 1
|
||||
# Title: Implement Task Data Structure
|
||||
# Status: done
|
||||
# Status: in-progress
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Design and implement the core tasks.json structure that will serve as the single source of truth for the system.
|
||||
|
||||
@@ -1,61 +1,142 @@
|
||||
# Task ID: 23
|
||||
# Title: Implement MCP Server Functionality for Task Master using FastMCP
|
||||
# Status: pending
|
||||
# Title: Complete MCP Server Implementation for Task Master using FastMCP
|
||||
# Status: in-progress
|
||||
# Dependencies: 22
|
||||
# Priority: medium
|
||||
# Description: Extend Task Master to function as an MCP server by leveraging FastMCP's JavaScript/TypeScript implementation for efficient context management services.
|
||||
# Description: Finalize the MCP server functionality for Task Master by leveraging FastMCP's capabilities, transitioning from CLI-based execution to direct function imports, and optimizing performance, authentication, and context management. Ensure the server integrates seamlessly with Cursor via `mcp.json` and supports proper tool registration, efficient context handling, and transport type handling (focusing on stdio). Additionally, ensure the server can be instantiated properly when installed via `npx` or `npm i -g`. Evaluate and address gaps in the current implementation, including function imports, context management, caching, tool registration, and adherence to FastMCP best practices.
|
||||
# Details:
|
||||
This task involves implementing the Model Context Protocol server capabilities within Task Master using FastMCP. The implementation should:
|
||||
This task involves completing the Model Context Protocol (MCP) server implementation for Task Master using FastMCP. Key updates include:
|
||||
|
||||
1. Use FastMCP to create the MCP server module (`mcp-server.ts` or equivalent)
|
||||
2. Implement the required MCP endpoints using FastMCP:
|
||||
- `/context` - For retrieving and updating context
|
||||
- `/models` - For listing available models
|
||||
- `/execute` - For executing operations with context
|
||||
3. Utilize FastMCP's built-in features for context management, including:
|
||||
- Efficient context storage and retrieval
|
||||
- Context windowing and truncation
|
||||
- Metadata and tagging support
|
||||
4. Add authentication and authorization mechanisms using FastMCP capabilities
|
||||
5. Implement error handling and response formatting as per MCP specifications
|
||||
6. Configure Task Master to enable/disable MCP server functionality via FastMCP settings
|
||||
7. Add documentation on using Task Master as an MCP server with FastMCP
|
||||
8. Ensure compatibility with existing MCP clients by adhering to FastMCP's compliance features
|
||||
9. Optimize performance using FastMCP tools, especially for context retrieval operations
|
||||
10. Add logging for MCP server operations using FastMCP's logging utilities
|
||||
1. Transition from CLI-based execution (currently using `child_process.spawnSync`) to direct Task Master function imports for improved performance and reliability.
|
||||
2. Implement caching mechanisms for frequently accessed contexts to enhance performance, leveraging FastMCP's efficient transport mechanisms (e.g., stdio).
|
||||
3. Refactor context management to align with best practices for handling large context windows, metadata, and tagging.
|
||||
4. Refactor tool registration in `tools/index.js` to include clear descriptions and parameter definitions, leveraging FastMCP's decorator-based patterns for better integration.
|
||||
5. Enhance transport type handling to ensure proper stdio communication and compatibility with FastMCP.
|
||||
6. Ensure the MCP server can be instantiated and run correctly when installed globally via `npx` or `npm i -g`.
|
||||
7. Integrate the ModelContextProtocol SDK directly to streamline resource and tool registration, ensuring compatibility with FastMCP's transport mechanisms.
|
||||
8. Identify and address missing components or functionalities to meet FastMCP best practices, such as robust error handling, monitoring endpoints, and concurrency support.
|
||||
9. Update documentation to include examples of using the MCP server with FastMCP, detailed setup instructions, and client integration guides.
|
||||
|
||||
The implementation should follow RESTful API design principles and leverage FastMCP's concurrency handling for multiple client requests. Consider using TypeScript for better type safety and integration with FastMCP[1][2].
|
||||
The implementation must ensure compatibility with existing MCP clients and follow RESTful API design principles, while supporting concurrent requests and maintaining robust error handling.
|
||||
|
||||
# Test Strategy:
|
||||
Testing for the MCP server functionality should include:
|
||||
Testing for the MCP server implementation will follow a comprehensive approach based on our established testing guidelines:
|
||||
|
||||
1. Unit tests:
|
||||
- Test each MCP endpoint handler function independently using FastMCP
|
||||
- Verify context storage and retrieval mechanisms provided by FastMCP
|
||||
- Test authentication and authorization logic
|
||||
- Validate error handling for various failure scenarios
|
||||
## Test Organization
|
||||
|
||||
2. Integration tests:
|
||||
- Set up a test MCP server instance using FastMCP
|
||||
- Test complete request/response cycles for each endpoint
|
||||
- Verify context persistence across multiple requests
|
||||
- Test with various payload sizes and content types
|
||||
1. **Unit Tests** (`tests/unit/mcp-server/`):
|
||||
- Test individual MCP server components in isolation
|
||||
- Mock all external dependencies including FastMCP SDK
|
||||
- Test each tool implementation separately
|
||||
- Verify direct function imports work correctly
|
||||
- Test context management and caching mechanisms
|
||||
- Example files: `context-manager.test.js`, `tool-registration.test.js`, `direct-imports.test.js`
|
||||
|
||||
3. Compatibility tests:
|
||||
- Test with existing MCP client libraries
|
||||
- Verify compliance with the MCP specification
|
||||
- Ensure backward compatibility with any MCP versions supported by FastMCP
|
||||
2. **Integration Tests** (`tests/integration/mcp-server/`):
|
||||
- Test interactions between MCP server components
|
||||
- Verify proper tool registration with FastMCP
|
||||
- Test context flow between components
|
||||
- Validate error handling across module boundaries
|
||||
- Example files: `server-tool-integration.test.js`, `context-flow.test.js`
|
||||
|
||||
4. Performance tests:
|
||||
- Measure response times for context operations with various context sizes
|
||||
- Test concurrent request handling using FastMCP's concurrency tools
|
||||
- Verify memory usage remains within acceptable limits during extended operation
|
||||
3. **End-to-End Tests** (`tests/e2e/mcp-server/`):
|
||||
- Test complete MCP server workflows
|
||||
- Verify server instantiation via different methods (direct, npx, global install)
|
||||
- Test actual stdio communication with mock clients
|
||||
- Example files: `server-startup.e2e.test.js`, `client-communication.e2e.test.js`
|
||||
|
||||
5. Security tests:
|
||||
- Verify authentication mechanisms cannot be bypassed
|
||||
- Test for common API vulnerabilities (injection, CSRF, etc.)
|
||||
4. **Test Fixtures** (`tests/fixtures/mcp-server/`):
|
||||
- Sample context data
|
||||
- Mock tool definitions
|
||||
- Sample MCP requests and responses
|
||||
|
||||
All tests should be automated and included in the CI/CD pipeline. Documentation should include examples of how to test the MCP server functionality manually using tools like curl or Postman.
|
||||
## Testing Approach
|
||||
|
||||
### Module Mocking Strategy
|
||||
```javascript
|
||||
// Mock the FastMCP SDK
|
||||
jest.mock('@model-context-protocol/sdk', () => ({
|
||||
MCPServer: jest.fn().mockImplementation(() => ({
|
||||
registerTool: jest.fn(),
|
||||
registerResource: jest.fn(),
|
||||
start: jest.fn().mockResolvedValue(undefined),
|
||||
stop: jest.fn().mockResolvedValue(undefined)
|
||||
})),
|
||||
MCPError: jest.fn().mockImplementation(function(message, code) {
|
||||
this.message = message;
|
||||
this.code = code;
|
||||
})
|
||||
}));
|
||||
|
||||
// Import modules after mocks
|
||||
import { MCPServer, MCPError } from '@model-context-protocol/sdk';
|
||||
import { initMCPServer } from '../../scripts/mcp-server.js';
|
||||
```
|
||||
|
||||
### Context Management Testing
|
||||
- Test context creation, retrieval, and manipulation
|
||||
- Verify caching mechanisms work correctly
|
||||
- Test context windowing and metadata handling
|
||||
- Validate context persistence across server restarts
|
||||
|
||||
### Direct Function Import Testing
|
||||
- Verify Task Master functions are imported correctly
|
||||
- Test performance improvements compared to CLI execution
|
||||
- Validate error handling with direct imports
|
||||
|
||||
### Tool Registration Testing
|
||||
- Verify tools are registered with proper descriptions and parameters
|
||||
- Test decorator-based registration patterns
|
||||
- Validate tool execution with different input types
|
||||
|
||||
### Error Handling Testing
|
||||
- Test all error paths with appropriate MCPError types
|
||||
- Verify error propagation to clients
|
||||
- Test recovery from various error conditions
|
||||
|
||||
### Performance Testing
|
||||
- Benchmark response times with and without caching
|
||||
- Test memory usage under load
|
||||
- Verify concurrent request handling
|
||||
|
||||
## Test Quality Guidelines
|
||||
|
||||
- Follow TDD approach when possible
|
||||
- Maintain test independence and isolation
|
||||
- Use descriptive test names explaining expected behavior
|
||||
- Aim for 80%+ code coverage, with critical paths at 100%
|
||||
- Follow the mock-first-then-import pattern for all Jest mocks
|
||||
- Avoid testing implementation details that might change
|
||||
- Ensure tests don't depend on execution order
|
||||
|
||||
## Specific Test Cases
|
||||
|
||||
1. **Server Initialization**
|
||||
- Test server creation with various configuration options
|
||||
- Verify proper tool and resource registration
|
||||
- Test server startup and shutdown procedures
|
||||
|
||||
2. **Context Operations**
|
||||
- Test context creation, retrieval, update, and deletion
|
||||
- Verify context windowing and truncation
|
||||
- Test context metadata and tagging
|
||||
|
||||
3. **Tool Execution**
|
||||
- Test each tool with various input parameters
|
||||
- Verify proper error handling for invalid inputs
|
||||
- Test tool execution performance
|
||||
|
||||
4. **MCP.json Integration**
|
||||
- Test creation and updating of .cursor/mcp.json
|
||||
- Verify proper server registration in mcp.json
|
||||
- Test handling of existing mcp.json files
|
||||
|
||||
5. **Transport Handling**
|
||||
- Test stdio communication
|
||||
- Verify proper message formatting
|
||||
- Test error handling in transport layer
|
||||
|
||||
All tests will be automated and integrated into the CI/CD pipeline to ensure consistent quality.
|
||||
|
||||
# Subtasks:
|
||||
## 1. Create Core MCP Server Module and Basic Structure [done]
|
||||
@@ -79,7 +160,7 @@ Testing approach:
|
||||
- Test basic error handling with invalid requests
|
||||
|
||||
## 2. Implement Context Management System [done]
|
||||
### Dependencies: [32m[1m23.1[22m[39m
|
||||
### Dependencies: 23.1
|
||||
### Description: Develop a robust context management system that can efficiently store, retrieve, and manipulate context data according to the MCP specification.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
@@ -100,7 +181,7 @@ Testing approach:
|
||||
- Test persistence mechanisms with simulated failures
|
||||
|
||||
## 3. Implement MCP Endpoints and API Handlers [done]
|
||||
### Dependencies: [32m[1m23.1[22m[39m, [32m[1m23.2[22m[39m
|
||||
### Dependencies: 23.1, 23.2
|
||||
### Description: Develop the complete API handlers for all required MCP endpoints, ensuring they follow the protocol specification and integrate with the context management system.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
@@ -125,49 +206,233 @@ Testing approach:
|
||||
- Test error handling with invalid inputs
|
||||
- Benchmark endpoint performance
|
||||
|
||||
## 4. Implement Authentication and Authorization System [pending]
|
||||
### Dependencies: [32m[1m23.1[22m[39m, [32m[1m23.3[22m[39m
|
||||
### Description: Create a secure authentication and authorization mechanism for MCP clients to ensure only authorized applications can access the MCP server functionality.
|
||||
## 6. Refactor MCP Server to Leverage ModelContextProtocol SDK [deferred]
|
||||
### Dependencies: 23.1, 23.2, 23.3
|
||||
### Description: Integrate the ModelContextProtocol SDK directly into the MCP server implementation to streamline tool registration and resource handling.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Design authentication scheme (API keys, OAuth, JWT, etc.)
|
||||
2. Implement authentication middleware for all MCP endpoints
|
||||
3. Create an API key management system for client applications
|
||||
4. Develop role-based access control for different operations
|
||||
5. Implement rate limiting to prevent abuse
|
||||
6. Add secure token validation and handling
|
||||
7. Create endpoints for managing client credentials
|
||||
8. Implement audit logging for authentication events
|
||||
1. Replace manual tool registration with ModelContextProtocol SDK methods.
|
||||
2. Use SDK utilities to simplify resource and template management.
|
||||
3. Ensure compatibility with FastMCP's transport mechanisms.
|
||||
4. Update server initialization to include SDK-based configurations.
|
||||
|
||||
Testing approach:
|
||||
- Security testing for authentication mechanisms
|
||||
- Test access control with various permission levels
|
||||
- Verify rate limiting functionality
|
||||
- Test token validation with valid and invalid tokens
|
||||
- Simulate unauthorized access attempts
|
||||
- Verify audit logs contain appropriate information
|
||||
- Verify SDK integration with all MCP endpoints.
|
||||
- Test resource and template registration using SDK methods.
|
||||
- Validate compatibility with existing MCP clients.
|
||||
- Benchmark performance improvements from SDK integration.
|
||||
|
||||
## 5. Optimize Performance and Finalize Documentation [pending]
|
||||
### Dependencies: [32m[1m23.1[22m[39m, [32m[1m23.2[22m[39m, [32m[1m23.3[22m[39m, [31m[1m23.4[22m[39m
|
||||
### Description: Optimize the MCP server implementation for performance, especially for context retrieval operations, and create comprehensive documentation for users.
|
||||
## 8. Implement Direct Function Imports and Replace CLI-based Execution [done]
|
||||
### Dependencies: 23.13
|
||||
### Description: Refactor the MCP server implementation to use direct Task Master function imports instead of the current CLI-based execution using child_process.spawnSync. This will improve performance, reliability, and enable better error handling.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Profile the MCP server to identify performance bottlenecks
|
||||
2. Implement caching mechanisms for frequently accessed contexts
|
||||
3. Optimize context serialization and deserialization
|
||||
4. Add connection pooling for database operations (if applicable)
|
||||
5. Implement request batching for bulk operations
|
||||
6. Create comprehensive API documentation with examples
|
||||
7. Add setup and configuration guides to the Task Master documentation
|
||||
8. Create example client implementations
|
||||
9. Add monitoring endpoints for server health and metrics
|
||||
10. Implement graceful degradation under high load
|
||||
|
||||
Testing approach:
|
||||
- Load testing with simulated concurrent clients
|
||||
- Measure response times for various operations
|
||||
- Test with large context sizes to verify performance
|
||||
- Verify documentation accuracy with sample requests
|
||||
- Test monitoring endpoints
|
||||
- Perform stress testing to identify failure points
|
||||
|
||||
<info added on 2025-03-30T00:14:10.040Z>
|
||||
```
|
||||
# Refactoring Strategy for Direct Function Imports
|
||||
|
||||
## Core Approach
|
||||
1. Create a clear separation between data retrieval/processing and presentation logic
|
||||
2. Modify function signatures to accept `outputFormat` parameter ('cli'|'json', default: 'cli')
|
||||
3. Implement early returns for JSON format to bypass CLI-specific code
|
||||
|
||||
## Implementation Details for `listTasks`
|
||||
```javascript
|
||||
function listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat = 'cli') {
|
||||
try {
|
||||
// Existing data retrieval logic
|
||||
const filteredTasks = /* ... */;
|
||||
|
||||
// Early return for JSON format
|
||||
if (outputFormat === 'json') return filteredTasks;
|
||||
|
||||
// Existing CLI output logic
|
||||
} catch (error) {
|
||||
if (outputFormat === 'json') {
|
||||
throw {
|
||||
code: 'TASK_LIST_ERROR',
|
||||
message: error.message,
|
||||
details: error.stack
|
||||
};
|
||||
} else {
|
||||
console.error(error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
- Create integration tests in `tests/integration/mcp-server/`
|
||||
- Use FastMCP InMemoryTransport for direct client-server testing
|
||||
- Test both JSON and CLI output formats
|
||||
- Verify structure consistency with schema validation
|
||||
|
||||
## Additional Considerations
|
||||
- Update JSDoc comments to document new parameters and return types
|
||||
- Ensure backward compatibility with default CLI behavior
|
||||
- Add JSON schema validation for consistent output structure
|
||||
- Apply similar pattern to other core functions (expandTask, updateTaskById, etc.)
|
||||
|
||||
## Error Handling Improvements
|
||||
- Standardize error format for JSON returns:
|
||||
```javascript
|
||||
{
|
||||
code: 'ERROR_CODE',
|
||||
message: 'Human-readable message',
|
||||
details: {}, // Additional context when available
|
||||
stack: process.env.NODE_ENV === 'development' ? error.stack : undefined
|
||||
}
|
||||
```
|
||||
- Enrich JSON errors with error codes and debug info
|
||||
- Ensure validation failures return proper objects in JSON mode
|
||||
```
|
||||
</info added on 2025-03-30T00:14:10.040Z>
|
||||
|
||||
## 9. Implement Context Management and Caching Mechanisms [done]
|
||||
### Dependencies: 23.1
|
||||
### Description: Enhance the MCP server with proper context management and caching to improve performance and user experience, especially for frequently accessed data and contexts.
|
||||
### Details:
|
||||
1. Implement a context manager class that leverages FastMCP's Context object
|
||||
2. Add caching for frequently accessed task data with configurable TTL settings
|
||||
3. Implement context tagging for better organization of context data
|
||||
4. Add methods to efficiently handle large context windows
|
||||
5. Create helper functions for storing and retrieving context data
|
||||
6. Implement cache invalidation strategies for task updates
|
||||
7. Add cache statistics for monitoring performance
|
||||
8. Create unit tests for context management and caching functionality
|
||||
|
||||
## 10. Enhance Tool Registration and Resource Management [deferred]
|
||||
### Dependencies: 23.1, 23.8
|
||||
### Description: Refactor tool registration to follow FastMCP best practices, using decorators and improving the overall structure. Implement proper resource management for task templates and other shared resources.
|
||||
### Details:
|
||||
1. Update registerTaskMasterTools function to use FastMCP's decorator pattern
|
||||
2. Implement @mcp.tool() decorators for all existing tools
|
||||
3. Add proper type annotations and documentation for all tools
|
||||
4. Create resource handlers for task templates using @mcp.resource()
|
||||
5. Implement resource templates for common task patterns
|
||||
6. Update the server initialization to properly register all tools and resources
|
||||
7. Add validation for tool inputs using FastMCP's built-in validation
|
||||
8. Create comprehensive tests for tool registration and resource access
|
||||
|
||||
## 11. Implement Comprehensive Error Handling [deferred]
|
||||
### Dependencies: 23.1, 23.3
|
||||
### Description: Implement robust error handling using FastMCP's MCPError, including custom error types for different categories and standardized error responses.
|
||||
### Details:
|
||||
1. Create custom error types extending MCPError for different categories (validation, auth, etc.)\n2. Implement standardized error responses following MCP protocol\n3. Add error handling middleware for all MCP endpoints\n4. Ensure proper error propagation from tools to client\n5. Add debug mode with detailed error information\n6. Document error types and handling patterns
|
||||
|
||||
## 12. Implement Structured Logging System [deferred]
|
||||
### Dependencies: 23.1, 23.3
|
||||
### Description: Implement a comprehensive logging system for the MCP server with different log levels, structured logging format, and request/response tracking.
|
||||
### Details:
|
||||
1. Design structured log format for consistent parsing\n2. Implement different log levels (debug, info, warn, error)\n3. Add request/response logging middleware\n4. Implement correlation IDs for request tracking\n5. Add performance metrics logging\n6. Configure log output destinations (console, file)\n7. Document logging patterns and usage
|
||||
|
||||
## 13. Create Testing Framework and Test Suite [deferred]
|
||||
### Dependencies: 23.1, 23.3
|
||||
### Description: Implement a comprehensive testing framework for the MCP server, including unit tests, integration tests, and end-to-end tests.
|
||||
### Details:
|
||||
1. Set up Jest testing framework with proper configuration\n2. Create MCPTestClient for testing FastMCP server interaction\n3. Implement unit tests for individual tool functions\n4. Create integration tests for end-to-end request/response cycles\n5. Set up test fixtures and mock data\n6. Implement test coverage reporting\n7. Document testing guidelines and examples
|
||||
|
||||
## 14. Add MCP.json to the Init Workflow [done]
|
||||
### Dependencies: 23.1, 23.3
|
||||
### Description: Implement functionality to create or update .cursor/mcp.json during project initialization, handling cases where: 1) If there's no mcp.json, create it with the appropriate configuration; 2) If there is an mcp.json, intelligently append to it without syntax errors like trailing commas
|
||||
### Details:
|
||||
1. Create functionality to detect if .cursor/mcp.json exists in the project\n2. Implement logic to create a new mcp.json file with proper structure if it doesn't exist\n3. Add functionality to read and parse existing mcp.json if it exists\n4. Create method to add a new taskmaster-ai server entry to the mcpServers object\n5. Implement intelligent JSON merging that avoids trailing commas and syntax errors\n6. Ensure proper formatting and indentation in the generated/updated JSON\n7. Add validation to verify the updated configuration is valid JSON\n8. Include this functionality in the init workflow\n9. Add error handling for file system operations and JSON parsing\n10. Document the mcp.json structure and integration process
|
||||
|
||||
## 15. Implement SSE Support for Real-time Updates [deferred]
|
||||
### Dependencies: 23.1, 23.3, 23.11
|
||||
### Description: Add Server-Sent Events (SSE) capabilities to the MCP server to enable real-time updates and streaming of task execution progress, logs, and status changes to clients
|
||||
### Details:
|
||||
1. Research and implement SSE protocol for the MCP server\n2. Create dedicated SSE endpoints for event streaming\n3. Implement event emitter pattern for internal event management\n4. Add support for different event types (task status, logs, errors)\n5. Implement client connection management with proper keep-alive handling\n6. Add filtering capabilities to allow subscribing to specific event types\n7. Create in-memory event buffer for clients reconnecting\n8. Document SSE endpoint usage and client implementation examples\n9. Add robust error handling for dropped connections\n10. Implement rate limiting and backpressure mechanisms\n11. Add authentication for SSE connections
|
||||
|
||||
## 16. Implement parse-prd MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for parsing PRD documents to generate tasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create parsePRDDirect function in task-master-core.js:\n - Import parsePRD from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: input file, output path, numTasks\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create parse-prd.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import parsePRDDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerParsePRDTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for parsePRDDirect\n - Integration test for MCP tool
|
||||
|
||||
## 17. Implement update MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for updating multiple tasks based on prompt.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create updateTasksDirect function in task-master-core.js:\n - Import updateTasks from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: fromId, prompt, useResearch\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create update.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import updateTasksDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerUpdateTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for updateTasksDirect\n - Integration test for MCP tool
|
||||
|
||||
## 18. Implement update-task MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for updating a single task by ID with new information.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create updateTaskByIdDirect function in task-master-core.js:\n - Import updateTaskById from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId, prompt, useResearch\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create update-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import updateTaskByIdDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerUpdateTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for updateTaskByIdDirect\n - Integration test for MCP tool
|
||||
|
||||
## 19. Implement update-subtask MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for appending information to a specific subtask.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create updateSubtaskByIdDirect function in task-master-core.js:\n - Import updateSubtaskById from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: subtaskId, prompt, useResearch\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create update-subtask.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import updateSubtaskByIdDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerUpdateSubtaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for updateSubtaskByIdDirect\n - Integration test for MCP tool
|
||||
|
||||
## 20. Implement generate MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for generating task files from tasks.json.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create generateTaskFilesDirect function in task-master-core.js:\n - Import generateTaskFiles from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: tasksPath, outputDir\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create generate.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import generateTaskFilesDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerGenerateTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for generateTaskFilesDirect\n - Integration test for MCP tool
|
||||
|
||||
## 21. Implement set-status MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for setting task status.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create setTaskStatusDirect function in task-master-core.js:\n - Import setTaskStatus from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId, status\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create set-status.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import setTaskStatusDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerSetStatusTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for setTaskStatusDirect\n - Integration test for MCP tool
|
||||
|
||||
## 22. Implement show-task MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for showing task details.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create showTaskDirect function in task-master-core.js:\n - Import showTask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create show-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import showTaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerShowTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for showTaskDirect\n - Integration test for MCP tool
|
||||
|
||||
## 23. Implement next-task MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for finding the next task to work on.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create nextTaskDirect function in task-master-core.js:\n - Import nextTask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments (no specific args needed except projectRoot/file)\n - Handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create next-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import nextTaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerNextTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for nextTaskDirect\n - Integration test for MCP tool
|
||||
|
||||
## 24. Implement expand-task MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for expanding a task into subtasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create expandTaskDirect function in task-master-core.js:\n - Import expandTask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId, prompt, num, force, research\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create expand-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import expandTaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerExpandTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for expandTaskDirect\n - Integration test for MCP tool
|
||||
|
||||
## 25. Implement add-task MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for adding new tasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create addTaskDirect function in task-master-core.js:\n - Import addTask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: prompt, priority, dependencies\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create add-task.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import addTaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerAddTaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for addTaskDirect\n - Integration test for MCP tool
|
||||
|
||||
## 26. Implement add-subtask MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for adding subtasks to existing tasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create addSubtaskDirect function in task-master-core.js:\n - Import addSubtask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: parentTaskId, title, description, details\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create add-subtask.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import addSubtaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerAddSubtaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for addSubtaskDirect\n - Integration test for MCP tool
|
||||
|
||||
## 27. Implement remove-subtask MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for removing subtasks from tasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create removeSubtaskDirect function in task-master-core.js:\n - Import removeSubtask from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: parentTaskId, subtaskId\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create remove-subtask.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import removeSubtaskDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerRemoveSubtaskTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for removeSubtaskDirect\n - Integration test for MCP tool
|
||||
|
||||
## 28. Implement analyze MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for analyzing task complexity.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create analyzeTaskComplexityDirect function in task-master-core.js:\n - Import analyzeTaskComplexity from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create analyze.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import analyzeTaskComplexityDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerAnalyzeTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for analyzeTaskComplexityDirect\n - Integration test for MCP tool
|
||||
|
||||
## 29. Implement clear-subtasks MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for clearing subtasks from a parent task.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create clearSubtasksDirect function in task-master-core.js:\n - Import clearSubtasks from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: taskId\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create clear-subtasks.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import clearSubtasksDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerClearSubtasksTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for clearSubtasksDirect\n - Integration test for MCP tool
|
||||
|
||||
## 30. Implement expand-all MCP command [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create direct function wrapper and MCP tool for expanding all tasks into subtasks.
|
||||
### Details:
|
||||
Following MCP implementation standards:\n\n1. Create expandAllTasksDirect function in task-master-core.js:\n - Import expandAllTasks from task-manager.js\n - Handle file paths using findTasksJsonPath utility\n - Process arguments: prompt, num, force, research\n - Validate inputs and handle errors with try/catch\n - Return standardized { success, data/error } object\n - Add to directFunctions map\n\n2. Create expand-all.js MCP tool in mcp-server/src/tools/:\n - Import z from zod for parameter schema\n - Import executeMCPToolAction from ./utils.js\n - Import expandAllTasksDirect from task-master-core.js\n - Define parameters matching CLI options using zod schema\n - Implement registerExpandAllTool(server) with server.addTool\n - Use executeMCPToolAction in execute method\n\n3. Register in tools/index.js\n\n4. Add to .cursor/mcp.json with appropriate schema\n\n5. Write tests following testing guidelines:\n - Unit test for expandAllTasksDirect\n - Integration test for MCP tool
|
||||
|
||||
|
||||
@@ -1,56 +1,231 @@
|
||||
# Task ID: 32
|
||||
# Title: Implement 'learn' Command for Automatic Cursor Rule Generation
|
||||
# Title: Implement "learn" Command for Automatic Cursor Rule Generation
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Create a new 'learn' command that analyzes code changes and chat history to automatically generate or update Cursor rules in the .cursor/rules directory based on successful implementation patterns.
|
||||
# Description: Create a new "learn" command that analyzes Cursor's chat history and code changes to automatically generate or update rule files in the .cursor/rules directory, following the cursor_rules.mdc template format. This command will help Cursor autonomously improve its ability to follow development standards by learning from successful implementations.
|
||||
# Details:
|
||||
Implement a new command in the task-master CLI that enables Cursor to learn from successful coding patterns:
|
||||
Implement a new command in the task-master CLI that enables Cursor to learn from successful coding patterns and chat interactions:
|
||||
|
||||
1. Create a new module `commands/learn.js` that implements the command logic
|
||||
2. Update `index.js` to register the new command
|
||||
3. The command should:
|
||||
- Accept an optional parameter for specifying which patterns to focus on
|
||||
- Use git diff to extract code changes since the last commit
|
||||
- Access the Cursor chat history if possible (investigate API or file storage location)
|
||||
- Call Claude via ai-services.js with the following context:
|
||||
* Code diffs
|
||||
* Chat history excerpts showing challenges and solutions
|
||||
* Existing rules from .cursor/rules if present
|
||||
- Parse Claude's response to extract rule definitions
|
||||
- Create or update .mdc files in the .cursor/rules directory
|
||||
- Provide a summary of what was learned and which rules were updated
|
||||
Key Components:
|
||||
1. Cursor Data Analysis
|
||||
- Access and parse Cursor's chat history from ~/Library/Application Support/Cursor/User/History
|
||||
- Extract relevant patterns, corrections, and successful implementations
|
||||
- Track file changes and their associated chat context
|
||||
|
||||
4. Create helper functions to:
|
||||
- Extract relevant patterns from diffs
|
||||
- Format the prompt for Claude to focus on identifying reusable patterns
|
||||
- Parse Claude's response into valid rule definitions
|
||||
- Handle rule conflicts or duplications
|
||||
2. Rule Management
|
||||
- Use cursor_rules.mdc as the template for all rule file formatting
|
||||
- Manage rule files in .cursor/rules directory
|
||||
- Support both creation and updates of rule files
|
||||
- Categorize rules based on context (testing, components, API, etc.)
|
||||
|
||||
5. Ensure the command handles errors gracefully, especially if chat history is inaccessible
|
||||
6. Add appropriate logging to show the learning process
|
||||
7. Document the command in the README.md file
|
||||
3. AI Integration
|
||||
- Utilize ai-services.js to interact with Claude
|
||||
- Provide comprehensive context including:
|
||||
* Relevant chat history showing the evolution of solutions
|
||||
* Code changes and their outcomes
|
||||
* Existing rules and template structure
|
||||
- Generate or update rules while maintaining template consistency
|
||||
|
||||
4. Implementation Requirements:
|
||||
- Automatic triggering after task completion (configurable)
|
||||
- Manual triggering via CLI command
|
||||
- Proper error handling for missing or corrupt files
|
||||
- Validation against cursor_rules.mdc template
|
||||
- Performance optimization for large histories
|
||||
- Clear logging and progress indication
|
||||
|
||||
5. Key Files:
|
||||
- commands/learn.js: Main command implementation
|
||||
- rules/cursor-rules-manager.js: Rule file management
|
||||
- utils/chat-history-analyzer.js: Cursor chat analysis
|
||||
- index.js: Command registration
|
||||
|
||||
6. Security Considerations:
|
||||
- Safe file system operations
|
||||
- Proper error handling for inaccessible files
|
||||
- Validation of generated rules
|
||||
- Backup of existing rules before updates
|
||||
|
||||
# Test Strategy:
|
||||
1. Unit tests:
|
||||
- Create tests for each helper function in isolation
|
||||
- Mock git diff responses and chat history data
|
||||
- Verify rule extraction logic works with different input patterns
|
||||
- Test error handling for various failure scenarios
|
||||
1. Unit Tests:
|
||||
- Test each component in isolation:
|
||||
* Chat history extraction and analysis
|
||||
* Rule file management and validation
|
||||
* Pattern detection and categorization
|
||||
* Template validation logic
|
||||
- Mock file system operations and AI responses
|
||||
- Test error handling and edge cases
|
||||
|
||||
2. Integration tests:
|
||||
- Test the command in a repository with actual code changes
|
||||
- Verify it correctly generates .mdc files in the .cursor/rules directory
|
||||
- Check that generated rules follow the correct format
|
||||
- Verify the command correctly updates existing rules without losing custom modifications
|
||||
2. Integration Tests:
|
||||
- End-to-end command execution
|
||||
- File system interactions
|
||||
- AI service integration
|
||||
- Rule generation and updates
|
||||
- Template compliance validation
|
||||
|
||||
3. Manual testing scenarios:
|
||||
- Run the command after implementing a feature with specific patterns
|
||||
- Verify the generated rules capture the intended patterns
|
||||
- Test the command with and without existing rules
|
||||
- Verify the command works when chat history is available and when it isn't
|
||||
- Test with large diffs to ensure performance remains acceptable
|
||||
3. Manual Testing:
|
||||
- Test after completing actual development tasks
|
||||
- Verify rule quality and usefulness
|
||||
- Check template compliance
|
||||
- Validate performance with large histories
|
||||
- Test automatic and manual triggering
|
||||
|
||||
4. Validation Criteria:
|
||||
- Generated rules follow cursor_rules.mdc format
|
||||
- Rules capture meaningful patterns
|
||||
- Performance remains acceptable
|
||||
- Error handling works as expected
|
||||
- Generated rules improve Cursor's effectiveness
|
||||
|
||||
# Subtasks:
|
||||
## 1. Create Initial File Structure [pending]
|
||||
### Dependencies: None
|
||||
### Description: Set up the basic file structure for the learn command implementation
|
||||
### Details:
|
||||
Create the following files with basic exports:
|
||||
- commands/learn.js
|
||||
- rules/cursor-rules-manager.js
|
||||
- utils/chat-history-analyzer.js
|
||||
- utils/cursor-path-helper.js
|
||||
|
||||
## 2. Implement Cursor Path Helper [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create utility functions to handle Cursor's application data paths
|
||||
### Details:
|
||||
In utils/cursor-path-helper.js implement:
|
||||
- getCursorAppDir(): Returns ~/Library/Application Support/Cursor
|
||||
- getCursorHistoryDir(): Returns User/History path
|
||||
- getCursorLogsDir(): Returns logs directory path
|
||||
- validatePaths(): Ensures required directories exist
|
||||
|
||||
## 3. Create Chat History Analyzer Base [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create the base structure for analyzing Cursor's chat history
|
||||
### Details:
|
||||
In utils/chat-history-analyzer.js create:
|
||||
- ChatHistoryAnalyzer class
|
||||
- readHistoryDir(): Lists all history directories
|
||||
- readEntriesJson(): Parses entries.json files
|
||||
- parseHistoryEntry(): Extracts relevant data from .js files
|
||||
|
||||
## 4. Implement Chat History Extraction [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add core functionality to extract relevant chat history
|
||||
### Details:
|
||||
In ChatHistoryAnalyzer add:
|
||||
- extractChatHistory(startTime): Gets history since task start
|
||||
- parseFileChanges(): Extracts code changes
|
||||
- parseAIInteractions(): Extracts AI responses
|
||||
- filterRelevantHistory(): Removes irrelevant entries
|
||||
|
||||
## 5. Create CursorRulesManager Base [pending]
|
||||
### Dependencies: None
|
||||
### Description: Set up the base structure for managing Cursor rules
|
||||
### Details:
|
||||
In rules/cursor-rules-manager.js create:
|
||||
- CursorRulesManager class
|
||||
- readTemplate(): Reads cursor_rules.mdc
|
||||
- listRuleFiles(): Lists all .mdc files
|
||||
- readRuleFile(): Reads specific rule file
|
||||
|
||||
## 6. Implement Template Validation [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add validation logic for rule files against cursor_rules.mdc
|
||||
### Details:
|
||||
In CursorRulesManager add:
|
||||
- validateRuleFormat(): Checks against template
|
||||
- parseTemplateStructure(): Extracts template sections
|
||||
- validateAgainstTemplate(): Validates content structure
|
||||
- getRequiredSections(): Lists mandatory sections
|
||||
|
||||
## 7. Add Rule Categorization Logic [pending]
|
||||
### Dependencies: None
|
||||
### Description: Implement logic to categorize changes into rule files
|
||||
### Details:
|
||||
In CursorRulesManager add:
|
||||
- categorizeChanges(): Maps changes to rule files
|
||||
- detectRuleCategories(): Identifies relevant categories
|
||||
- getRuleFileForPattern(): Maps patterns to files
|
||||
- createNewRuleFile(): Initializes new rule files
|
||||
|
||||
## 8. Implement Pattern Analysis [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create functions to analyze implementation patterns
|
||||
### Details:
|
||||
In ChatHistoryAnalyzer add:
|
||||
- extractPatterns(): Finds success patterns
|
||||
- extractCorrections(): Finds error corrections
|
||||
- findSuccessfulPaths(): Tracks successful implementations
|
||||
- analyzeDecisions(): Extracts key decisions
|
||||
|
||||
## 9. Create AI Prompt Builder [pending]
|
||||
### Dependencies: None
|
||||
### Description: Implement prompt construction for Claude
|
||||
### Details:
|
||||
In learn.js create:
|
||||
- buildRuleUpdatePrompt(): Builds Claude prompt
|
||||
- formatHistoryContext(): Formats chat history
|
||||
- formatRuleContext(): Formats current rules
|
||||
- buildInstructions(): Creates specific instructions
|
||||
|
||||
## 10. Implement Learn Command Core [pending]
|
||||
### Dependencies: None
|
||||
### Description: Create the main learn command implementation
|
||||
### Details:
|
||||
In commands/learn.js implement:
|
||||
- learnCommand(): Main command function
|
||||
- processRuleUpdates(): Handles rule updates
|
||||
- generateSummary(): Creates learning summary
|
||||
- handleErrors(): Manages error cases
|
||||
|
||||
## 11. Add Auto-trigger Support [pending]
|
||||
### Dependencies: None
|
||||
### Description: Implement automatic learning after task completion
|
||||
### Details:
|
||||
Update task-manager.js:
|
||||
- Add autoLearnConfig handling
|
||||
- Modify completeTask() to trigger learning
|
||||
- Add learning status tracking
|
||||
- Implement learning queue
|
||||
|
||||
## 12. Implement CLI Integration [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add the learn command to the CLI
|
||||
### Details:
|
||||
Update index.js to:
|
||||
- Register learn command
|
||||
- Add command options
|
||||
- Handle manual triggers
|
||||
- Process command flags
|
||||
|
||||
## 13. Add Progress Logging [pending]
|
||||
### Dependencies: None
|
||||
### Description: Implement detailed progress logging
|
||||
### Details:
|
||||
Create utils/learn-logger.js with:
|
||||
- logLearningProgress(): Tracks overall progress
|
||||
- logRuleUpdates(): Tracks rule changes
|
||||
- logErrors(): Handles error logging
|
||||
- createSummary(): Generates final report
|
||||
|
||||
## 14. Implement Error Recovery [pending]
|
||||
### Dependencies: None
|
||||
### Description: Add robust error handling throughout the system
|
||||
### Details:
|
||||
Create utils/error-handler.js with:
|
||||
- handleFileErrors(): Manages file system errors
|
||||
- handleParsingErrors(): Manages parsing failures
|
||||
- handleAIErrors(): Manages Claude API errors
|
||||
- implementRecoveryStrategies(): Adds recovery logic
|
||||
|
||||
## 15. Add Performance Optimization [pending]
|
||||
### Dependencies: None
|
||||
### Description: Optimize performance for large histories
|
||||
### Details:
|
||||
Add to utils/performance-optimizer.js:
|
||||
- implementCaching(): Adds result caching
|
||||
- optimizeFileReading(): Improves file reading
|
||||
- addProgressiveLoading(): Implements lazy loading
|
||||
- addMemoryManagement(): Manages memory usage
|
||||
|
||||
4. Validation:
|
||||
- After generating rules, use them in Cursor to verify they correctly guide future implementations
|
||||
- Have multiple team members test the command to ensure consistent results
|
||||
|
||||
156
tasks/task_034.txt
Normal file
156
tasks/task_034.txt
Normal file
@@ -0,0 +1,156 @@
|
||||
# Task ID: 34
|
||||
# Title: Implement updateTask Command for Single Task Updates
|
||||
# Status: done
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Create a new command that allows updating a specific task by ID using AI-driven refinement while preserving completed subtasks and supporting all existing update command options.
|
||||
# Details:
|
||||
Implement a new command called 'updateTask' that focuses on updating a single task rather than all tasks from an ID onwards. The implementation should:
|
||||
|
||||
1. Accept a single task ID as a required parameter
|
||||
2. Use the same AI-driven approach as the existing update command to refine the task
|
||||
3. Preserve the completion status of any subtasks that were previously marked as complete
|
||||
4. Support all options from the existing update command including:
|
||||
- The research flag for Perplexity integration
|
||||
- Any formatting or refinement options
|
||||
- Task context options
|
||||
5. Update the CLI help documentation to include this new command
|
||||
6. Ensure the command follows the same pattern as other commands in the codebase
|
||||
7. Add appropriate error handling for cases where the specified task ID doesn't exist
|
||||
8. Implement the ability to update task title, description, and details separately if needed
|
||||
9. Ensure the command returns appropriate success/failure messages
|
||||
10. Optimize the implementation to only process the single task rather than scanning through all tasks
|
||||
|
||||
The command should reuse existing AI prompt templates where possible but modify them to focus on refining a single task rather than multiple tasks.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify the following aspects:
|
||||
|
||||
1. **Basic Functionality Test**: Verify that the command successfully updates a single task when given a valid task ID
|
||||
2. **Preservation Test**: Create a task with completed subtasks, update it, and verify the completion status remains intact
|
||||
3. **Research Flag Test**: Test the command with the research flag and verify it correctly integrates with Perplexity
|
||||
4. **Error Handling Tests**:
|
||||
- Test with non-existent task ID and verify appropriate error message
|
||||
- Test with invalid parameters and verify helpful error messages
|
||||
5. **Integration Test**: Run a complete workflow that creates a task, updates it with updateTask, and then verifies the changes are persisted
|
||||
6. **Comparison Test**: Compare the results of updating a single task with updateTask versus using the original update command on the same task to ensure consistent quality
|
||||
7. **Performance Test**: Measure execution time compared to the full update command to verify efficiency gains
|
||||
8. **CLI Help Test**: Verify the command appears correctly in help documentation with appropriate descriptions
|
||||
|
||||
Create unit tests for the core functionality and integration tests for the complete workflow. Document any edge cases discovered during testing.
|
||||
|
||||
# Subtasks:
|
||||
## 1. Create updateTaskById function in task-manager.js [done]
|
||||
### Dependencies: None
|
||||
### Description: Implement a new function in task-manager.js that focuses on updating a single task by ID using AI-driven refinement while preserving completed subtasks.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create a new `updateTaskById` function in task-manager.js that accepts parameters: taskId, options object (containing research flag, formatting options, etc.)
|
||||
2. Implement logic to find a specific task by ID in the tasks array
|
||||
3. Add appropriate error handling for cases where the task ID doesn't exist (throw a custom error)
|
||||
4. Reuse existing AI prompt templates but modify them to focus on refining a single task
|
||||
5. Implement logic to preserve completion status of subtasks that were previously marked as complete
|
||||
6. Add support for updating task title, description, and details separately based on options
|
||||
7. Optimize the implementation to only process the single task rather than scanning through all tasks
|
||||
8. Return the updated task and appropriate success/failure messages
|
||||
|
||||
Testing approach:
|
||||
- Unit test the function with various scenarios including:
|
||||
- Valid task ID with different update options
|
||||
- Non-existent task ID
|
||||
- Task with completed subtasks to verify preservation
|
||||
- Different combinations of update options
|
||||
|
||||
## 2. Implement updateTask command in commands.js [done]
|
||||
### Dependencies: 34.1
|
||||
### Description: Create a new command called 'updateTask' in commands.js that leverages the updateTaskById function to update a specific task by ID.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create a new command object for 'updateTask' in commands.js following the Command pattern
|
||||
2. Define command parameters including a required taskId parameter
|
||||
3. Support all options from the existing update command:
|
||||
- Research flag for Perplexity integration
|
||||
- Formatting and refinement options
|
||||
- Task context options
|
||||
4. Implement the command handler function that calls the updateTaskById function from task-manager.js
|
||||
5. Add appropriate error handling to catch and display user-friendly error messages
|
||||
6. Ensure the command follows the same pattern as other commands in the codebase
|
||||
7. Implement proper validation of input parameters
|
||||
8. Format and return appropriate success/failure messages to the user
|
||||
|
||||
Testing approach:
|
||||
- Unit test the command handler with various input combinations
|
||||
- Test error handling scenarios
|
||||
- Verify command options are correctly passed to the updateTaskById function
|
||||
|
||||
## 3. Add comprehensive error handling and validation [done]
|
||||
### Dependencies: 34.1, 34.2
|
||||
### Description: Implement robust error handling and validation for the updateTask command to ensure proper user feedback and system stability.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create custom error types for different failure scenarios (TaskNotFoundError, ValidationError, etc.)
|
||||
2. Implement input validation for the taskId parameter and all options
|
||||
3. Add proper error handling for AI service failures with appropriate fallback mechanisms
|
||||
4. Implement concurrency handling to prevent conflicts when multiple updates occur simultaneously
|
||||
5. Add comprehensive logging for debugging and auditing purposes
|
||||
6. Ensure all error messages are user-friendly and actionable
|
||||
7. Implement proper HTTP status codes for API responses if applicable
|
||||
8. Add validation to ensure the task exists before attempting updates
|
||||
|
||||
Testing approach:
|
||||
- Test various error scenarios including invalid inputs, non-existent tasks, and API failures
|
||||
- Verify error messages are clear and helpful
|
||||
- Test concurrency scenarios with multiple simultaneous updates
|
||||
- Verify logging captures appropriate information for troubleshooting
|
||||
|
||||
## 4. Write comprehensive tests for updateTask command [done]
|
||||
### Dependencies: 34.1, 34.2, 34.3
|
||||
### Description: Create a comprehensive test suite for the updateTask command to ensure it works correctly in all scenarios and maintains backward compatibility.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create unit tests for the updateTaskById function in task-manager.js
|
||||
- Test finding and updating tasks with various IDs
|
||||
- Test preservation of completed subtasks
|
||||
- Test different update options combinations
|
||||
- Test error handling for non-existent tasks
|
||||
2. Create unit tests for the updateTask command in commands.js
|
||||
- Test command parameter parsing
|
||||
- Test option handling
|
||||
- Test error scenarios and messages
|
||||
3. Create integration tests that verify the end-to-end flow
|
||||
- Test the command with actual AI service integration
|
||||
- Test with mock AI responses for predictable testing
|
||||
4. Implement test fixtures and mocks for consistent testing
|
||||
5. Add performance tests to ensure the command is efficient
|
||||
6. Test edge cases such as empty tasks, tasks with many subtasks, etc.
|
||||
|
||||
Testing approach:
|
||||
- Use Jest or similar testing framework
|
||||
- Implement mocks for external dependencies like AI services
|
||||
- Create test fixtures for consistent test data
|
||||
- Use snapshot testing for command output verification
|
||||
|
||||
## 5. Update CLI documentation and help text [done]
|
||||
### Dependencies: 34.2
|
||||
### Description: Update the CLI help documentation to include the new updateTask command and ensure users understand its purpose and options.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Add comprehensive help text for the updateTask command including:
|
||||
- Command description
|
||||
- Required and optional parameters
|
||||
- Examples of usage
|
||||
- Description of all supported options
|
||||
2. Update the main CLI help documentation to include the new command
|
||||
3. Add the command to any relevant command groups or categories
|
||||
4. Create usage examples that demonstrate common scenarios
|
||||
5. Update README.md and other documentation files to include information about the new command
|
||||
6. Add inline code comments explaining the implementation details
|
||||
7. Update any API documentation if applicable
|
||||
8. Create or update user guides with the new functionality
|
||||
|
||||
Testing approach:
|
||||
- Verify help text is displayed correctly when running `--help`
|
||||
- Review documentation for clarity and completeness
|
||||
- Have team members review the documentation for usability
|
||||
- Test examples to ensure they work as documented
|
||||
|
||||
48
tasks/task_035.txt
Normal file
48
tasks/task_035.txt
Normal file
@@ -0,0 +1,48 @@
|
||||
# Task ID: 35
|
||||
# Title: Integrate Grok3 API for Research Capabilities
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Replace the current Perplexity API integration with Grok3 API for all research-related functionalities while maintaining existing feature parity.
|
||||
# Details:
|
||||
This task involves migrating from Perplexity to Grok3 API for research capabilities throughout the application. Implementation steps include:
|
||||
|
||||
1. Create a new API client module for Grok3 in `src/api/grok3.ts` that handles authentication, request formatting, and response parsing
|
||||
2. Update the research service layer to use the new Grok3 client instead of Perplexity
|
||||
3. Modify the request payload structure to match Grok3's expected format (parameters like temperature, max_tokens, etc.)
|
||||
4. Update response handling to properly parse and extract Grok3's response format
|
||||
5. Implement proper error handling for Grok3-specific error codes and messages
|
||||
6. Update environment variables and configuration files to include Grok3 API keys and endpoints
|
||||
7. Ensure rate limiting and quota management are properly implemented according to Grok3's specifications
|
||||
8. Update any UI components that display research provider information to show Grok3 instead of Perplexity
|
||||
9. Maintain backward compatibility for any stored research results from Perplexity
|
||||
10. Document the new API integration in the developer documentation
|
||||
|
||||
Grok3 API has different parameter requirements and response formats compared to Perplexity, so careful attention must be paid to these differences during implementation.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify that the Grok3 API integration works correctly and maintains feature parity with the previous Perplexity implementation:
|
||||
|
||||
1. Unit tests:
|
||||
- Test the Grok3 API client with mocked responses
|
||||
- Verify proper error handling for various error scenarios (rate limits, authentication failures, etc.)
|
||||
- Test the transformation of application requests to Grok3-compatible format
|
||||
|
||||
2. Integration tests:
|
||||
- Perform actual API calls to Grok3 with test credentials
|
||||
- Verify that research results are correctly parsed and returned
|
||||
- Test with various types of research queries to ensure broad compatibility
|
||||
|
||||
3. End-to-end tests:
|
||||
- Test the complete research flow from UI input to displayed results
|
||||
- Verify that all existing research features work with the new API
|
||||
|
||||
4. Performance tests:
|
||||
- Compare response times between Perplexity and Grok3
|
||||
- Ensure the application handles any differences in response time appropriately
|
||||
|
||||
5. Regression tests:
|
||||
- Verify that existing features dependent on research capabilities continue to work
|
||||
- Test that stored research results from Perplexity are still accessible and displayed correctly
|
||||
|
||||
Create a test environment with both APIs available to compare results and ensure quality before fully replacing Perplexity with Grok3.
|
||||
48
tasks/task_036.txt
Normal file
48
tasks/task_036.txt
Normal file
@@ -0,0 +1,48 @@
|
||||
# Task ID: 36
|
||||
# Title: Add Ollama Support for AI Services as Claude Alternative
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Implement Ollama integration as an alternative to Claude for all main AI services, allowing users to run local language models instead of relying on cloud-based Claude API.
|
||||
# Details:
|
||||
This task involves creating a comprehensive Ollama integration that can replace Claude across all main AI services in the application. Implementation should include:
|
||||
|
||||
1. Create an OllamaService class that implements the same interface as the ClaudeService to ensure compatibility
|
||||
2. Add configuration options to specify Ollama endpoint URL (default: http://localhost:11434)
|
||||
3. Implement model selection functionality to allow users to choose which Ollama model to use (e.g., llama3, mistral, etc.)
|
||||
4. Handle prompt formatting specific to Ollama models, ensuring proper system/user message separation
|
||||
5. Implement proper error handling for cases where Ollama server is unavailable or returns errors
|
||||
6. Add fallback mechanism to Claude when Ollama fails or isn't configured
|
||||
7. Update the AI service factory to conditionally create either Claude or Ollama service based on configuration
|
||||
8. Ensure token counting and rate limiting are appropriately handled for Ollama models
|
||||
9. Add documentation for users explaining how to set up and use Ollama with the application
|
||||
10. Optimize prompt templates specifically for Ollama models if needed
|
||||
|
||||
The implementation should be toggled through a configuration option (useOllama: true/false) and should maintain all existing functionality currently provided by Claude.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify that Ollama integration works correctly as a drop-in replacement for Claude:
|
||||
|
||||
1. Unit tests:
|
||||
- Test OllamaService class methods in isolation with mocked responses
|
||||
- Verify proper error handling when Ollama server is unavailable
|
||||
- Test fallback mechanism to Claude when configured
|
||||
|
||||
2. Integration tests:
|
||||
- Test with actual Ollama server running locally with at least two different models
|
||||
- Verify all AI service functions work correctly with Ollama
|
||||
- Compare outputs between Claude and Ollama for quality assessment
|
||||
|
||||
3. Configuration tests:
|
||||
- Verify toggling between Claude and Ollama works as expected
|
||||
- Test with various model configurations
|
||||
|
||||
4. Performance tests:
|
||||
- Measure and compare response times between Claude and Ollama
|
||||
- Test with different load scenarios
|
||||
|
||||
5. Manual testing:
|
||||
- Verify all main AI features work correctly with Ollama
|
||||
- Test edge cases like very long inputs or specialized tasks
|
||||
|
||||
Create a test document comparing output quality between Claude and various Ollama models to help users understand the tradeoffs.
|
||||
49
tasks/task_037.txt
Normal file
49
tasks/task_037.txt
Normal file
@@ -0,0 +1,49 @@
|
||||
# Task ID: 37
|
||||
# Title: Add Gemini Support for Main AI Services as Claude Alternative
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Implement Google's Gemini API integration as an alternative to Claude for all main AI services, allowing users to switch between different LLM providers.
|
||||
# Details:
|
||||
This task involves integrating Google's Gemini API across all main AI services that currently use Claude:
|
||||
|
||||
1. Create a new GeminiService class that implements the same interface as the existing ClaudeService
|
||||
2. Implement authentication and API key management for Gemini API
|
||||
3. Map our internal prompt formats to Gemini's expected input format
|
||||
4. Handle Gemini-specific parameters (temperature, top_p, etc.) and response parsing
|
||||
5. Update the AI service factory/provider to support selecting Gemini as an alternative
|
||||
6. Add configuration options in settings to allow users to select Gemini as their preferred provider
|
||||
7. Implement proper error handling for Gemini-specific API errors
|
||||
8. Ensure streaming responses are properly supported if Gemini offers this capability
|
||||
9. Update documentation to reflect the new Gemini option
|
||||
10. Consider implementing model selection if Gemini offers multiple models (e.g., Gemini Pro, Gemini Ultra)
|
||||
11. Ensure all existing AI capabilities (summarization, code generation, etc.) maintain feature parity when using Gemini
|
||||
|
||||
The implementation should follow the same pattern as the recent Ollama integration (Task #36) to maintain consistency in how alternative AI providers are supported.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify Gemini integration works correctly across all AI services:
|
||||
|
||||
1. Unit tests:
|
||||
- Test GeminiService class methods with mocked API responses
|
||||
- Verify proper error handling for common API errors
|
||||
- Test configuration and model selection functionality
|
||||
|
||||
2. Integration tests:
|
||||
- Verify authentication and API connection with valid credentials
|
||||
- Test each AI service with Gemini to ensure proper functionality
|
||||
- Compare outputs between Claude and Gemini for the same inputs to verify quality
|
||||
|
||||
3. End-to-end tests:
|
||||
- Test the complete user flow of switching to Gemini and using various AI features
|
||||
- Verify streaming responses work correctly if supported
|
||||
|
||||
4. Performance tests:
|
||||
- Measure and compare response times between Claude and Gemini
|
||||
- Test with various input lengths to verify handling of context limits
|
||||
|
||||
5. Manual testing:
|
||||
- Verify the quality of Gemini responses across different use cases
|
||||
- Test edge cases like very long inputs or specialized domain knowledge
|
||||
|
||||
All tests should pass with Gemini selected as the provider, and the user experience should be consistent regardless of which provider is selected.
|
||||
56
tasks/task_038.txt
Normal file
56
tasks/task_038.txt
Normal file
@@ -0,0 +1,56 @@
|
||||
# Task ID: 38
|
||||
# Title: Implement Version Check System with Upgrade Notifications
|
||||
# Status: done
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Create a system that checks for newer package versions and displays upgrade notifications when users run any command, informing them to update to the latest version.
|
||||
# Details:
|
||||
Implement a version check mechanism that runs automatically with every command execution:
|
||||
|
||||
1. Create a new module (e.g., `versionChecker.js`) that will:
|
||||
- Fetch the latest version from npm registry using the npm registry API (https://registry.npmjs.org/task-master-ai/latest)
|
||||
- Compare it with the current installed version (from package.json)
|
||||
- Store the last check timestamp to avoid excessive API calls (check once per day)
|
||||
- Cache the result to minimize network requests
|
||||
|
||||
2. The notification should:
|
||||
- Use colored text (e.g., yellow background with black text) to be noticeable
|
||||
- Include the current version and latest version
|
||||
- Show the exact upgrade command: 'npm i task-master-ai@latest'
|
||||
- Be displayed at the beginning or end of command output, not interrupting the main content
|
||||
- Include a small separator line to distinguish it from command output
|
||||
|
||||
3. Implementation considerations:
|
||||
- Handle network failures gracefully (don't block command execution if version check fails)
|
||||
- Add a configuration option to disable update checks if needed
|
||||
- Ensure the check is lightweight and doesn't significantly impact command performance
|
||||
- Consider using a package like 'semver' for proper version comparison
|
||||
- Implement a cooldown period (e.g., only check once per day) to avoid excessive API calls
|
||||
|
||||
4. The version check should be integrated into the main command execution flow so it runs for all commands automatically.
|
||||
|
||||
# Test Strategy:
|
||||
1. Manual testing:
|
||||
- Install an older version of the package
|
||||
- Run various commands and verify the update notification appears
|
||||
- Update to the latest version and confirm the notification no longer appears
|
||||
- Test with network disconnected to ensure graceful handling of failures
|
||||
|
||||
2. Unit tests:
|
||||
- Mock the npm registry response to test different scenarios:
|
||||
- When a newer version exists
|
||||
- When using the latest version
|
||||
- When the registry is unavailable
|
||||
- Test the version comparison logic with various version strings
|
||||
- Test the cooldown/caching mechanism works correctly
|
||||
|
||||
3. Integration tests:
|
||||
- Create a test that runs a command and verifies the notification appears in the expected format
|
||||
- Test that the notification appears for all commands
|
||||
- Verify the notification doesn't interfere with normal command output
|
||||
|
||||
4. Edge cases to test:
|
||||
- Pre-release versions (alpha/beta)
|
||||
- Very old versions
|
||||
- When package.json is missing or malformed
|
||||
- When npm registry returns unexpected data
|
||||
128
tasks/task_039.txt
Normal file
128
tasks/task_039.txt
Normal file
@@ -0,0 +1,128 @@
|
||||
# Task ID: 39
|
||||
# Title: Update Project Licensing to Dual License Structure
|
||||
# Status: done
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Replace the current MIT license with a dual license structure that protects commercial rights for project owners while allowing non-commercial use under an open source license.
|
||||
# Details:
|
||||
This task requires implementing a comprehensive licensing update across the project:
|
||||
|
||||
1. Remove all instances of the MIT license from the codebase, including any MIT license files, headers in source files, and references in documentation.
|
||||
|
||||
2. Create a dual license structure with:
|
||||
- Business Source License (BSL) 1.1 or similar for commercial use, explicitly stating that commercial rights are exclusively reserved for Ralph & Eyal
|
||||
- Apache 2.0 for non-commercial use, allowing the community to use, modify, and distribute the code for non-commercial purposes
|
||||
|
||||
3. Update the license field in package.json to reflect the dual license structure (e.g., "BSL 1.1 / Apache 2.0")
|
||||
|
||||
4. Add a clear, concise explanation of the licensing terms in the README.md, including:
|
||||
- A summary of what users can and cannot do with the code
|
||||
- Who holds commercial rights
|
||||
- How to obtain commercial use permission if needed
|
||||
- Links to the full license texts
|
||||
|
||||
5. Create a detailed LICENSE.md file that includes:
|
||||
- Full text of both licenses
|
||||
- Clear delineation between commercial and non-commercial use
|
||||
- Specific definitions of what constitutes commercial use
|
||||
- Any additional terms or clarifications specific to this project
|
||||
|
||||
6. Create a CONTRIBUTING.md file that explicitly states:
|
||||
- Contributors must agree that their contributions will be subject to the project's dual licensing
|
||||
- Commercial rights for all contributions are assigned to Ralph & Eyal
|
||||
- Guidelines for acceptable contributions
|
||||
|
||||
7. Ensure all source code files include appropriate license headers that reference the dual license structure.
|
||||
|
||||
# Test Strategy:
|
||||
To verify correct implementation, perform the following checks:
|
||||
|
||||
1. File verification:
|
||||
- Confirm the MIT license file has been removed
|
||||
- Verify LICENSE.md exists and contains both BSL and Apache 2.0 license texts
|
||||
- Confirm README.md includes the license section with clear explanation
|
||||
- Verify CONTRIBUTING.md exists with proper contributor guidelines
|
||||
- Check package.json for updated license field
|
||||
|
||||
2. Content verification:
|
||||
- Review LICENSE.md to ensure it properly describes the dual license structure with clear terms
|
||||
- Verify README.md license section is concise yet complete
|
||||
- Check that commercial rights are explicitly reserved for Ralph & Eyal in all relevant documents
|
||||
- Ensure CONTRIBUTING.md clearly explains the licensing implications for contributors
|
||||
|
||||
3. Legal review:
|
||||
- Have a team member not involved in the implementation review all license documents
|
||||
- Verify that the chosen BSL terms properly protect commercial interests
|
||||
- Confirm the Apache 2.0 implementation is correct and compatible with the BSL portions
|
||||
|
||||
4. Source code check:
|
||||
- Sample at least 10 source files to ensure they have updated license headers
|
||||
- Verify no MIT license references remain in any source files
|
||||
|
||||
5. Documentation check:
|
||||
- Ensure any documentation that mentioned licensing has been updated to reflect the new structure
|
||||
|
||||
# Subtasks:
|
||||
## 1. Remove MIT License and Create Dual License Files [done]
|
||||
### Dependencies: None
|
||||
### Description: Remove all MIT license references from the codebase and create the new license files for the dual license structure.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Scan the entire codebase to identify all instances of MIT license references (license files, headers in source files, documentation mentions).
|
||||
2. Remove the MIT license file and all direct references to it.
|
||||
3. Create a LICENSE.md file containing:
|
||||
- Full text of Business Source License (BSL) 1.1 with explicit commercial rights reservation for Ralph & Eyal
|
||||
- Full text of Apache 2.0 license for non-commercial use
|
||||
- Clear definitions of what constitutes commercial vs. non-commercial use
|
||||
- Specific terms for obtaining commercial use permission
|
||||
4. Create a CONTRIBUTING.md file that explicitly states the contribution terms:
|
||||
- Contributors must agree to the dual licensing structure
|
||||
- Commercial rights for all contributions are assigned to Ralph & Eyal
|
||||
- Guidelines for acceptable contributions
|
||||
|
||||
Testing approach:
|
||||
- Verify all MIT license references have been removed using a grep or similar search tool
|
||||
- Have legal review of the LICENSE.md and CONTRIBUTING.md files to ensure they properly protect commercial rights
|
||||
- Validate that the license files are properly formatted and readable
|
||||
|
||||
## 2. Update Source Code License Headers and Package Metadata [done]
|
||||
### Dependencies: 39.1
|
||||
### Description: Add appropriate dual license headers to all source code files and update package metadata to reflect the new licensing structure.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create a template for the new license header that references the dual license structure (BSL 1.1 / Apache 2.0).
|
||||
2. Systematically update all source code files to include the new license header, replacing any existing MIT headers.
|
||||
3. Update the license field in package.json to "BSL 1.1 / Apache 2.0".
|
||||
4. Update any other metadata files (composer.json, setup.py, etc.) that contain license information.
|
||||
5. Verify that any build scripts or tools that reference licensing information are updated.
|
||||
|
||||
Testing approach:
|
||||
- Write a script to verify that all source files contain the new license header
|
||||
- Validate package.json and other metadata files have the correct license field
|
||||
- Ensure any build processes that depend on license information still function correctly
|
||||
- Run a sample build to confirm license information is properly included in any generated artifacts
|
||||
|
||||
## 3. Update Documentation and Create License Explanation [done]
|
||||
### Dependencies: 39.1, 39.2
|
||||
### Description: Update project documentation to clearly explain the dual license structure and create comprehensive licensing guidance.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Update the README.md with a clear, concise explanation of the licensing terms:
|
||||
- Summary of what users can and cannot do with the code
|
||||
- Who holds commercial rights (Ralph & Eyal)
|
||||
- How to obtain commercial use permission
|
||||
- Links to the full license texts
|
||||
2. Create a dedicated LICENSING.md or similar document with detailed explanations of:
|
||||
- The rationale behind the dual licensing approach
|
||||
- Detailed examples of what constitutes commercial vs. non-commercial use
|
||||
- FAQs addressing common licensing questions
|
||||
3. Update any other documentation references to licensing throughout the project.
|
||||
4. Create visual aids (if appropriate) to help users understand the licensing structure.
|
||||
5. Ensure all documentation links to licensing information are updated.
|
||||
|
||||
Testing approach:
|
||||
- Have non-technical stakeholders review the documentation for clarity and understanding
|
||||
- Verify all links to license files work correctly
|
||||
- Ensure the explanation is comprehensive but concise enough for users to understand quickly
|
||||
- Check that the documentation correctly addresses the most common use cases and questions
|
||||
|
||||
102
tasks/task_040.txt
Normal file
102
tasks/task_040.txt
Normal file
@@ -0,0 +1,102 @@
|
||||
# Task ID: 40
|
||||
# Title: Implement Project Funding Documentation and Support Infrastructure
|
||||
# Status: in-progress
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Create FUNDING.yml for GitHub Sponsors integration that outlines all financial support options for the Task Master project.
|
||||
# Details:
|
||||
This task involves creating a FUNDING.yml file to enable and manage funding options for the Task Master project:
|
||||
|
||||
**FUNDING.yml file**:
|
||||
- Create a .github/FUNDING.yml file following GitHub's specifications
|
||||
- Include configuration for multiple funding platforms:
|
||||
- GitHub Sponsors (primary if available)
|
||||
- Open Collective
|
||||
- Patreon
|
||||
- Ko-fi
|
||||
- Liberapay
|
||||
- Custom funding URLs (project website donation page)
|
||||
- Research and reference successful implementation patterns from Vue.js, React, and TypeScript projects
|
||||
- Ensure the FUNDING.yml contains sufficient information to guide users on how to support the project
|
||||
- Include comments within the YAML file to provide context for each funding option
|
||||
|
||||
The implementation should maintain consistent branding and messaging with the rest of the Task Master project. Research at least 5 successful open source projects to identify best practices in funding configuration.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify the technical implementation of the FUNDING.yml file:
|
||||
|
||||
1. **FUNDING.yml validation**:
|
||||
- Verify the file is correctly placed in the .github directory
|
||||
- Validate YAML syntax using a linter
|
||||
- Test that GitHub correctly displays funding options on the repository page
|
||||
- Verify all links to external funding platforms are functional
|
||||
|
||||
2. **User experience testing**:
|
||||
- Test the complete funding workflow from a potential supporter's perspective
|
||||
- Verify the process is intuitive and barriers to contribution are minimized
|
||||
- Check that the Sponsor button appears correctly on GitHub
|
||||
- Ensure all funding platform links resolve to the correct destinations
|
||||
- Gather feedback from 2-3 potential users on clarity and ease of use
|
||||
|
||||
# Subtasks:
|
||||
## 1. Research and Create FUNDING.yml File [done]
|
||||
### Dependencies: None
|
||||
### Description: Research successful funding configurations and create the .github/FUNDING.yml file for GitHub Sponsors integration and other funding platforms.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create the .github directory at the project root if it doesn't exist
|
||||
2. Research funding configurations from 5 successful open source projects (Vue.js, React, TypeScript, etc.)
|
||||
3. Document the patterns and approaches used in these projects
|
||||
4. Create the FUNDING.yml file with the following platforms:
|
||||
- GitHub Sponsors (primary)
|
||||
- Open Collective
|
||||
- Patreon
|
||||
- Ko-fi
|
||||
- Liberapay
|
||||
- Custom donation URL for the project website
|
||||
5. Validate the YAML syntax using a linter
|
||||
6. Test the file by pushing to a test branch and verifying the Sponsor button appears correctly on GitHub
|
||||
|
||||
Testing approach:
|
||||
- Validate YAML syntax using yamllint or similar tool
|
||||
- Test on GitHub by checking if the Sponsor button appears in the repository
|
||||
- Verify each funding link resolves to the correct destination
|
||||
|
||||
## 4. Add Documentation Comments to FUNDING.yml [pending]
|
||||
### Dependencies: 40.1
|
||||
### Description: Add comprehensive comments within the FUNDING.yml file to provide context and guidance for each funding option.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Add a header comment explaining the purpose of the file
|
||||
2. For each funding platform entry, add comments that explain:
|
||||
- What the platform is
|
||||
- How funds are processed on this platform
|
||||
- Any specific benefits of using this platform
|
||||
- Brief instructions for potential sponsors
|
||||
3. Include a comment about how sponsors will be acknowledged
|
||||
4. Add information about fund allocation (maintenance, new features, infrastructure)
|
||||
5. Ensure comments follow YAML comment syntax and don't break the file structure
|
||||
|
||||
Testing approach:
|
||||
- Validate that the YAML file still passes linting with comments added
|
||||
- Verify the file still functions correctly on GitHub
|
||||
- Have at least one team member review the comments for clarity and completeness
|
||||
|
||||
## 5. Integrate Funding Information in Project README [pending]
|
||||
### Dependencies: 40.1, 40.4
|
||||
### Description: Add a section to the project README that highlights the funding options and directs users to the Sponsor button.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create a 'Support the Project' or 'Sponsorship' section in the README.md
|
||||
2. Explain briefly why financial support matters for the project
|
||||
3. Direct users to the GitHub Sponsor button
|
||||
4. Mention the alternative funding platforms available
|
||||
5. Include a brief note on how funds will be used
|
||||
6. Add any relevant funding badges (e.g., Open Collective, GitHub Sponsors)
|
||||
|
||||
Testing approach:
|
||||
- Review the README section for clarity and conciseness
|
||||
- Verify all links work correctly
|
||||
- Ensure the section is appropriately visible but doesn't overshadow project information
|
||||
- Check that badges render correctly
|
||||
|
||||
89
tasks/task_041.txt
Normal file
89
tasks/task_041.txt
Normal file
@@ -0,0 +1,89 @@
|
||||
# Task ID: 41
|
||||
# Title: Implement GitHub Actions CI Workflow for Task Master
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Create a streamlined CI workflow file (ci.yml) that efficiently tests the Task Master codebase using GitHub Actions.
|
||||
# Details:
|
||||
Create a GitHub Actions workflow file at `.github/workflows/ci.yml` with the following specifications:
|
||||
|
||||
1. Configure the workflow to trigger on:
|
||||
- Push events to any branch
|
||||
- Pull request events targeting any branch
|
||||
|
||||
2. Core workflow configuration:
|
||||
- Use Ubuntu latest as the primary testing environment
|
||||
- Use Node.js 20.x (LTS) for consistency with the project
|
||||
- Focus on single environment for speed and simplicity
|
||||
|
||||
3. Configure workflow steps to:
|
||||
- Checkout the repository using actions/checkout@v4
|
||||
- Set up Node.js using actions/setup-node@v4 with npm caching
|
||||
- Install dependencies with 'npm ci'
|
||||
- Run tests with 'npm run test:coverage'
|
||||
|
||||
4. Implement efficient caching:
|
||||
- Cache node_modules using actions/cache@v4
|
||||
- Use package-lock.json hash for cache key
|
||||
- Implement proper cache restoration keys
|
||||
|
||||
5. Ensure proper timeouts:
|
||||
- 2 minutes for dependency installation
|
||||
- Appropriate timeout for test execution
|
||||
|
||||
6. Artifact handling:
|
||||
- Upload test results and coverage reports
|
||||
- Use consistent naming for artifacts
|
||||
- Retain artifacts for 30 days
|
||||
|
||||
# Test Strategy:
|
||||
To verify correct implementation of the GitHub Actions CI workflow:
|
||||
|
||||
1. Manual verification:
|
||||
- Check that the file is correctly placed at `.github/workflows/ci.yml`
|
||||
- Verify the YAML syntax is valid
|
||||
- Confirm all required configurations are present
|
||||
|
||||
2. Functional testing:
|
||||
- Push a commit to verify the workflow triggers
|
||||
- Create a PR to verify the workflow runs on pull requests
|
||||
- Verify test coverage reports are generated and uploaded
|
||||
- Confirm caching is working effectively
|
||||
|
||||
3. Performance testing:
|
||||
- Verify cache hits reduce installation time
|
||||
- Confirm workflow completes within expected timeframe
|
||||
- Check artifact upload and download speeds
|
||||
|
||||
# Subtasks:
|
||||
## 1. Create Basic GitHub Actions Workflow [pending]
|
||||
### Dependencies: None
|
||||
### Description: Set up the foundational GitHub Actions workflow file with proper triggers and Node.js setup
|
||||
### Details:
|
||||
1. Create `.github/workflows/ci.yml`
|
||||
2. Configure workflow name and triggers
|
||||
3. Set up Ubuntu runner and Node.js 20.x
|
||||
4. Implement checkout and Node.js setup actions
|
||||
5. Configure npm caching
|
||||
6. Test basic workflow functionality
|
||||
|
||||
## 2. Implement Test and Coverage Steps [pending]
|
||||
### Dependencies: 41.1
|
||||
### Description: Add test execution and coverage reporting to the workflow
|
||||
### Details:
|
||||
1. Add dependency installation with proper timeout
|
||||
2. Configure test execution with coverage
|
||||
3. Set up test results and coverage artifacts
|
||||
4. Verify artifact upload functionality
|
||||
5. Test the complete workflow
|
||||
|
||||
## 3. Optimize Workflow Performance [pending]
|
||||
### Dependencies: 41.1, 41.2
|
||||
### Description: Implement caching and performance optimizations
|
||||
### Details:
|
||||
1. Set up node_modules caching
|
||||
2. Configure cache key strategy
|
||||
3. Implement proper timeout values
|
||||
4. Test caching effectiveness
|
||||
5. Document performance improvements
|
||||
|
||||
623
tasks/tasks.json
623
tasks/tasks.json
@@ -12,12 +12,13 @@
|
||||
"id": 1,
|
||||
"title": "Implement Task Data Structure",
|
||||
"description": "Design and implement the core tasks.json structure that will serve as the single source of truth for the system.",
|
||||
"status": "done",
|
||||
"status": "in-progress",
|
||||
"dependencies": [],
|
||||
"priority": "high",
|
||||
"details": "Create the foundational data structure including:\n- JSON schema for tasks.json\n- Task model with all required fields (id, title, description, status, dependencies, priority, details, testStrategy, subtasks)\n- Validation functions for the task model\n- Basic file system operations for reading/writing tasks.json\n- Error handling for file operations",
|
||||
"testStrategy": "Verify that the tasks.json structure can be created, read, and validated. Test with sample data to ensure all fields are properly handled and that validation correctly identifies invalid structures.",
|
||||
"subtasks": []
|
||||
"subtasks": [],
|
||||
"previousStatus": "in-progress"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
@@ -1336,15 +1337,15 @@
|
||||
},
|
||||
{
|
||||
"id": 23,
|
||||
"title": "Implement MCP Server Functionality for Task Master using FastMCP",
|
||||
"description": "Extend Task Master to function as an MCP server by leveraging FastMCP's JavaScript/TypeScript implementation for efficient context management services.",
|
||||
"status": "pending",
|
||||
"title": "Complete MCP Server Implementation for Task Master using FastMCP",
|
||||
"description": "Finalize the MCP server functionality for Task Master by leveraging FastMCP's capabilities, transitioning from CLI-based execution to direct function imports, and optimizing performance, authentication, and context management. Ensure the server integrates seamlessly with Cursor via `mcp.json` and supports proper tool registration, efficient context handling, and transport type handling (focusing on stdio). Additionally, ensure the server can be instantiated properly when installed via `npx` or `npm i -g`. Evaluate and address gaps in the current implementation, including function imports, context management, caching, tool registration, and adherence to FastMCP best practices.",
|
||||
"status": "in-progress",
|
||||
"dependencies": [
|
||||
22
|
||||
],
|
||||
"priority": "medium",
|
||||
"details": "This task involves implementing the Model Context Protocol server capabilities within Task Master. The implementation should:\n\n1. Create a new module `mcp-server.js` that implements the core MCP server functionality\n2. Implement the required MCP endpoints:\n - `/context` - For retrieving and updating context\n - `/models` - For listing available models\n - `/execute` - For executing operations with context\n3. Develop a context management system that can:\n - Store and retrieve context data efficiently\n - Handle context windowing and truncation when limits are reached\n - Support context metadata and tagging\n4. Add authentication and authorization mechanisms for MCP clients\n5. Implement proper error handling and response formatting according to MCP specifications\n6. Create configuration options in Task Master to enable/disable the MCP server functionality\n7. Add documentation for how to use Task Master as an MCP server\n8. Ensure the implementation is compatible with existing MCP clients\n9. Optimize for performance, especially for context retrieval operations\n10. Add logging for MCP server operations\n\nThe implementation should follow RESTful API design principles and should be able to handle concurrent requests from multiple clients.",
|
||||
"testStrategy": "Testing for the MCP server functionality should include:\n\n1. Unit tests:\n - Test each MCP endpoint handler function independently\n - Verify context storage and retrieval mechanisms\n - Test authentication and authorization logic\n - Validate error handling for various failure scenarios\n\n2. Integration tests:\n - Set up a test MCP server instance\n - Test complete request/response cycles for each endpoint\n - Verify context persistence across multiple requests\n - Test with various payload sizes and content types\n\n3. Compatibility tests:\n - Test with existing MCP client libraries\n - Verify compliance with the MCP specification\n - Ensure backward compatibility with any MCP versions supported\n\n4. Performance tests:\n - Measure response times for context operations with various context sizes\n - Test concurrent request handling\n - Verify memory usage remains within acceptable limits during extended operation\n\n5. Security tests:\n - Verify authentication mechanisms cannot be bypassed\n - Test for common API vulnerabilities (injection, CSRF, etc.)\n\nAll tests should be automated and included in the CI/CD pipeline. Documentation should include examples of how to test the MCP server functionality manually using tools like curl or Postman.",
|
||||
"details": "This task involves completing the Model Context Protocol (MCP) server implementation for Task Master using FastMCP. Key updates include:\n\n1. Transition from CLI-based execution (currently using `child_process.spawnSync`) to direct Task Master function imports for improved performance and reliability.\n2. Implement caching mechanisms for frequently accessed contexts to enhance performance, leveraging FastMCP's efficient transport mechanisms (e.g., stdio).\n3. Refactor context management to align with best practices for handling large context windows, metadata, and tagging.\n4. Refactor tool registration in `tools/index.js` to include clear descriptions and parameter definitions, leveraging FastMCP's decorator-based patterns for better integration.\n5. Enhance transport type handling to ensure proper stdio communication and compatibility with FastMCP.\n6. Ensure the MCP server can be instantiated and run correctly when installed globally via `npx` or `npm i -g`.\n7. Integrate the ModelContextProtocol SDK directly to streamline resource and tool registration, ensuring compatibility with FastMCP's transport mechanisms.\n8. Identify and address missing components or functionalities to meet FastMCP best practices, such as robust error handling, monitoring endpoints, and concurrency support.\n9. Update documentation to include examples of using the MCP server with FastMCP, detailed setup instructions, and client integration guides.\n\nThe implementation must ensure compatibility with existing MCP clients and follow RESTful API design principles, while supporting concurrent requests and maintaining robust error handling.",
|
||||
"testStrategy": "Testing for the MCP server implementation will follow a comprehensive approach based on our established testing guidelines:\n\n## Test Organization\n\n1. **Unit Tests** (`tests/unit/mcp-server/`):\n - Test individual MCP server components in isolation\n - Mock all external dependencies including FastMCP SDK\n - Test each tool implementation separately\n - Verify direct function imports work correctly\n - Test context management and caching mechanisms\n - Example files: `context-manager.test.js`, `tool-registration.test.js`, `direct-imports.test.js`\n\n2. **Integration Tests** (`tests/integration/mcp-server/`):\n - Test interactions between MCP server components\n - Verify proper tool registration with FastMCP\n - Test context flow between components\n - Validate error handling across module boundaries\n - Example files: `server-tool-integration.test.js`, `context-flow.test.js`\n\n3. **End-to-End Tests** (`tests/e2e/mcp-server/`):\n - Test complete MCP server workflows\n - Verify server instantiation via different methods (direct, npx, global install)\n - Test actual stdio communication with mock clients\n - Example files: `server-startup.e2e.test.js`, `client-communication.e2e.test.js`\n\n4. **Test Fixtures** (`tests/fixtures/mcp-server/`):\n - Sample context data\n - Mock tool definitions\n - Sample MCP requests and responses\n\n## Testing Approach\n\n### Module Mocking Strategy\n```javascript\n// Mock the FastMCP SDK\njest.mock('@model-context-protocol/sdk', () => ({\n MCPServer: jest.fn().mockImplementation(() => ({\n registerTool: jest.fn(),\n registerResource: jest.fn(),\n start: jest.fn().mockResolvedValue(undefined),\n stop: jest.fn().mockResolvedValue(undefined)\n })),\n MCPError: jest.fn().mockImplementation(function(message, code) {\n this.message = message;\n this.code = code;\n })\n}));\n\n// Import modules after mocks\nimport { MCPServer, MCPError } from '@model-context-protocol/sdk';\nimport { initMCPServer } from '../../scripts/mcp-server.js';\n```\n\n### Context Management Testing\n- Test context creation, retrieval, and manipulation\n- Verify caching mechanisms work correctly\n- Test context windowing and metadata handling\n- Validate context persistence across server restarts\n\n### Direct Function Import Testing\n- Verify Task Master functions are imported correctly\n- Test performance improvements compared to CLI execution\n- Validate error handling with direct imports\n\n### Tool Registration Testing\n- Verify tools are registered with proper descriptions and parameters\n- Test decorator-based registration patterns\n- Validate tool execution with different input types\n\n### Error Handling Testing\n- Test all error paths with appropriate MCPError types\n- Verify error propagation to clients\n- Test recovery from various error conditions\n\n### Performance Testing\n- Benchmark response times with and without caching\n- Test memory usage under load\n- Verify concurrent request handling\n\n## Test Quality Guidelines\n\n- Follow TDD approach when possible\n- Maintain test independence and isolation\n- Use descriptive test names explaining expected behavior\n- Aim for 80%+ code coverage, with critical paths at 100%\n- Follow the mock-first-then-import pattern for all Jest mocks\n- Avoid testing implementation details that might change\n- Ensure tests don't depend on execution order\n\n## Specific Test Cases\n\n1. **Server Initialization**\n - Test server creation with various configuration options\n - Verify proper tool and resource registration\n - Test server startup and shutdown procedures\n\n2. **Context Operations**\n - Test context creation, retrieval, update, and deletion\n - Verify context windowing and truncation\n - Test context metadata and tagging\n\n3. **Tool Execution**\n - Test each tool with various input parameters\n - Verify proper error handling for invalid inputs\n - Test tool execution performance\n\n4. **MCP.json Integration**\n - Test creation and updating of .cursor/mcp.json\n - Verify proper server registration in mcp.json\n - Test handling of existing mcp.json files\n\n5. **Transport Handling**\n - Test stdio communication\n - Verify proper message formatting\n - Test error handling in transport layer\n\nAll tests will be automated and integrated into the CI/CD pipeline to ensure consistent quality.",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
@@ -1379,29 +1380,246 @@
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Implement Authentication and Authorization System",
|
||||
"description": "Create a secure authentication and authorization mechanism for MCP clients to ensure only authorized applications can access the MCP server functionality.",
|
||||
"dependencies": [
|
||||
1,
|
||||
3
|
||||
],
|
||||
"details": "Implementation steps:\n1. Design authentication scheme (API keys, OAuth, JWT, etc.)\n2. Implement authentication middleware for all MCP endpoints\n3. Create an API key management system for client applications\n4. Develop role-based access control for different operations\n5. Implement rate limiting to prevent abuse\n6. Add secure token validation and handling\n7. Create endpoints for managing client credentials\n8. Implement audit logging for authentication events\n\nTesting approach:\n- Security testing for authentication mechanisms\n- Test access control with various permission levels\n- Verify rate limiting functionality\n- Test token validation with valid and invalid tokens\n- Simulate unauthorized access attempts\n- Verify audit logs contain appropriate information",
|
||||
"status": "pending",
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Optimize Performance and Finalize Documentation",
|
||||
"description": "Optimize the MCP server implementation for performance, especially for context retrieval operations, and create comprehensive documentation for users.",
|
||||
"id": 6,
|
||||
"title": "Refactor MCP Server to Leverage ModelContextProtocol SDK",
|
||||
"description": "Integrate the ModelContextProtocol SDK directly into the MCP server implementation to streamline tool registration and resource handling.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4
|
||||
3
|
||||
],
|
||||
"details": "Implementation steps:\n1. Profile the MCP server to identify performance bottlenecks\n2. Implement caching mechanisms for frequently accessed contexts\n3. Optimize context serialization and deserialization\n4. Add connection pooling for database operations (if applicable)\n5. Implement request batching for bulk operations\n6. Create comprehensive API documentation with examples\n7. Add setup and configuration guides to the Task Master documentation\n8. Create example client implementations\n9. Add monitoring endpoints for server health and metrics\n10. Implement graceful degradation under high load\n\nTesting approach:\n- Load testing with simulated concurrent clients\n- Measure response times for various operations\n- Test with large context sizes to verify performance\n- Verify documentation accuracy with sample requests\n- Test monitoring endpoints\n- Perform stress testing to identify failure points",
|
||||
"details": "Implementation steps:\n1. Replace manual tool registration with ModelContextProtocol SDK methods.\n2. Use SDK utilities to simplify resource and template management.\n3. Ensure compatibility with FastMCP's transport mechanisms.\n4. Update server initialization to include SDK-based configurations.\n\nTesting approach:\n- Verify SDK integration with all MCP endpoints.\n- Test resource and template registration using SDK methods.\n- Validate compatibility with existing MCP clients.\n- Benchmark performance improvements from SDK integration.",
|
||||
"status": "deferred",
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"title": "Implement Direct Function Imports and Replace CLI-based Execution",
|
||||
"description": "Refactor the MCP server implementation to use direct Task Master function imports instead of the current CLI-based execution using child_process.spawnSync. This will improve performance, reliability, and enable better error handling.",
|
||||
"dependencies": [
|
||||
"23.13"
|
||||
],
|
||||
"details": "\n\n<info added on 2025-03-30T00:14:10.040Z>\n```\n# Refactoring Strategy for Direct Function Imports\n\n## Core Approach\n1. Create a clear separation between data retrieval/processing and presentation logic\n2. Modify function signatures to accept `outputFormat` parameter ('cli'|'json', default: 'cli')\n3. Implement early returns for JSON format to bypass CLI-specific code\n\n## Implementation Details for `listTasks`\n```javascript\nfunction listTasks(tasksPath, statusFilter, withSubtasks = false, outputFormat = 'cli') {\n try {\n // Existing data retrieval logic\n const filteredTasks = /* ... */;\n \n // Early return for JSON format\n if (outputFormat === 'json') return filteredTasks;\n \n // Existing CLI output logic\n } catch (error) {\n if (outputFormat === 'json') {\n throw {\n code: 'TASK_LIST_ERROR',\n message: error.message,\n details: error.stack\n };\n } else {\n console.error(error);\n process.exit(1);\n }\n }\n}\n```\n\n## Testing Strategy\n- Create integration tests in `tests/integration/mcp-server/`\n- Use FastMCP InMemoryTransport for direct client-server testing\n- Test both JSON and CLI output formats\n- Verify structure consistency with schema validation\n\n## Additional Considerations\n- Update JSDoc comments to document new parameters and return types\n- Ensure backward compatibility with default CLI behavior\n- Add JSON schema validation for consistent output structure\n- Apply similar pattern to other core functions (expandTask, updateTaskById, etc.)\n\n## Error Handling Improvements\n- Standardize error format for JSON returns:\n```javascript\n{\n code: 'ERROR_CODE',\n message: 'Human-readable message',\n details: {}, // Additional context when available\n stack: process.env.NODE_ENV === 'development' ? error.stack : undefined\n}\n```\n- Enrich JSON errors with error codes and debug info\n- Ensure validation failures return proper objects in JSON mode\n```\n</info added on 2025-03-30T00:14:10.040Z>",
|
||||
"status": "done",
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"title": "Implement Context Management and Caching Mechanisms",
|
||||
"description": "Enhance the MCP server with proper context management and caching to improve performance and user experience, especially for frequently accessed data and contexts.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "1. Implement a context manager class that leverages FastMCP's Context object\n2. Add caching for frequently accessed task data with configurable TTL settings\n3. Implement context tagging for better organization of context data\n4. Add methods to efficiently handle large context windows\n5. Create helper functions for storing and retrieving context data\n6. Implement cache invalidation strategies for task updates\n7. Add cache statistics for monitoring performance\n8. Create unit tests for context management and caching functionality",
|
||||
"status": "done",
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"title": "Enhance Tool Registration and Resource Management",
|
||||
"description": "Refactor tool registration to follow FastMCP best practices, using decorators and improving the overall structure. Implement proper resource management for task templates and other shared resources.",
|
||||
"dependencies": [
|
||||
1,
|
||||
"23.8"
|
||||
],
|
||||
"details": "1. Update registerTaskMasterTools function to use FastMCP's decorator pattern\n2. Implement @mcp.tool() decorators for all existing tools\n3. Add proper type annotations and documentation for all tools\n4. Create resource handlers for task templates using @mcp.resource()\n5. Implement resource templates for common task patterns\n6. Update the server initialization to properly register all tools and resources\n7. Add validation for tool inputs using FastMCP's built-in validation\n8. Create comprehensive tests for tool registration and resource access",
|
||||
"status": "deferred",
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"title": "Implement Comprehensive Error Handling",
|
||||
"description": "Implement robust error handling using FastMCP's MCPError, including custom error types for different categories and standardized error responses.",
|
||||
"details": "1. Create custom error types extending MCPError for different categories (validation, auth, etc.)\\n2. Implement standardized error responses following MCP protocol\\n3. Add error handling middleware for all MCP endpoints\\n4. Ensure proper error propagation from tools to client\\n5. Add debug mode with detailed error information\\n6. Document error types and handling patterns",
|
||||
"status": "deferred",
|
||||
"dependencies": [
|
||||
"23.1",
|
||||
"23.3"
|
||||
],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"title": "Implement Structured Logging System",
|
||||
"description": "Implement a comprehensive logging system for the MCP server with different log levels, structured logging format, and request/response tracking.",
|
||||
"details": "1. Design structured log format for consistent parsing\\n2. Implement different log levels (debug, info, warn, error)\\n3. Add request/response logging middleware\\n4. Implement correlation IDs for request tracking\\n5. Add performance metrics logging\\n6. Configure log output destinations (console, file)\\n7. Document logging patterns and usage",
|
||||
"status": "deferred",
|
||||
"dependencies": [
|
||||
"23.1",
|
||||
"23.3"
|
||||
],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 13,
|
||||
"title": "Create Testing Framework and Test Suite",
|
||||
"description": "Implement a comprehensive testing framework for the MCP server, including unit tests, integration tests, and end-to-end tests.",
|
||||
"details": "1. Set up Jest testing framework with proper configuration\\n2. Create MCPTestClient for testing FastMCP server interaction\\n3. Implement unit tests for individual tool functions\\n4. Create integration tests for end-to-end request/response cycles\\n5. Set up test fixtures and mock data\\n6. Implement test coverage reporting\\n7. Document testing guidelines and examples",
|
||||
"status": "deferred",
|
||||
"dependencies": [
|
||||
"23.1",
|
||||
"23.3"
|
||||
],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 14,
|
||||
"title": "Add MCP.json to the Init Workflow",
|
||||
"description": "Implement functionality to create or update .cursor/mcp.json during project initialization, handling cases where: 1) If there's no mcp.json, create it with the appropriate configuration; 2) If there is an mcp.json, intelligently append to it without syntax errors like trailing commas",
|
||||
"details": "1. Create functionality to detect if .cursor/mcp.json exists in the project\\n2. Implement logic to create a new mcp.json file with proper structure if it doesn't exist\\n3. Add functionality to read and parse existing mcp.json if it exists\\n4. Create method to add a new taskmaster-ai server entry to the mcpServers object\\n5. Implement intelligent JSON merging that avoids trailing commas and syntax errors\\n6. Ensure proper formatting and indentation in the generated/updated JSON\\n7. Add validation to verify the updated configuration is valid JSON\\n8. Include this functionality in the init workflow\\n9. Add error handling for file system operations and JSON parsing\\n10. Document the mcp.json structure and integration process",
|
||||
"status": "done",
|
||||
"dependencies": [
|
||||
"23.1",
|
||||
"23.3"
|
||||
],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 15,
|
||||
"title": "Implement SSE Support for Real-time Updates",
|
||||
"description": "Add Server-Sent Events (SSE) capabilities to the MCP server to enable real-time updates and streaming of task execution progress, logs, and status changes to clients",
|
||||
"details": "1. Research and implement SSE protocol for the MCP server\\n2. Create dedicated SSE endpoints for event streaming\\n3. Implement event emitter pattern for internal event management\\n4. Add support for different event types (task status, logs, errors)\\n5. Implement client connection management with proper keep-alive handling\\n6. Add filtering capabilities to allow subscribing to specific event types\\n7. Create in-memory event buffer for clients reconnecting\\n8. Document SSE endpoint usage and client implementation examples\\n9. Add robust error handling for dropped connections\\n10. Implement rate limiting and backpressure mechanisms\\n11. Add authentication for SSE connections",
|
||||
"status": "deferred",
|
||||
"dependencies": [
|
||||
"23.1",
|
||||
"23.3",
|
||||
"23.11"
|
||||
],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 16,
|
||||
"title": "Implement parse-prd MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for parsing PRD documents to generate tasks.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create parsePRDDirect function in task-master-core.js:\\n - Import parsePRD from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: input file, output path, numTasks\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create parse-prd.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import parsePRDDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerParsePRDTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for parsePRDDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 17,
|
||||
"title": "Implement update MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for updating multiple tasks based on prompt.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create updateTasksDirect function in task-master-core.js:\\n - Import updateTasks from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: fromId, prompt, useResearch\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create update.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import updateTasksDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerUpdateTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for updateTasksDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 18,
|
||||
"title": "Implement update-task MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for updating a single task by ID with new information.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create updateTaskByIdDirect function in task-master-core.js:\\n - Import updateTaskById from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: taskId, prompt, useResearch\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create update-task.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import updateTaskByIdDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerUpdateTaskTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for updateTaskByIdDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 19,
|
||||
"title": "Implement update-subtask MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for appending information to a specific subtask.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create updateSubtaskByIdDirect function in task-master-core.js:\\n - Import updateSubtaskById from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: subtaskId, prompt, useResearch\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create update-subtask.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import updateSubtaskByIdDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerUpdateSubtaskTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for updateSubtaskByIdDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 20,
|
||||
"title": "Implement generate MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for generating task files from tasks.json.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create generateTaskFilesDirect function in task-master-core.js:\\n - Import generateTaskFiles from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: tasksPath, outputDir\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create generate.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import generateTaskFilesDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerGenerateTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for generateTaskFilesDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 21,
|
||||
"title": "Implement set-status MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for setting task status.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create setTaskStatusDirect function in task-master-core.js:\\n - Import setTaskStatus from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: taskId, status\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create set-status.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import setTaskStatusDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerSetStatusTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for setTaskStatusDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 22,
|
||||
"title": "Implement show-task MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for showing task details.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create showTaskDirect function in task-master-core.js:\\n - Import showTask from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: taskId\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create show-task.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import showTaskDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerShowTaskTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for showTaskDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 23,
|
||||
"title": "Implement next-task MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for finding the next task to work on.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create nextTaskDirect function in task-master-core.js:\\n - Import nextTask from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments (no specific args needed except projectRoot/file)\\n - Handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create next-task.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import nextTaskDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerNextTaskTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for nextTaskDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 24,
|
||||
"title": "Implement expand-task MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for expanding a task into subtasks.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create expandTaskDirect function in task-master-core.js:\\n - Import expandTask from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: taskId, prompt, num, force, research\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create expand-task.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import expandTaskDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerExpandTaskTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for expandTaskDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 25,
|
||||
"title": "Implement add-task MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for adding new tasks.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create addTaskDirect function in task-master-core.js:\\n - Import addTask from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: prompt, priority, dependencies\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create add-task.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import addTaskDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerAddTaskTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for addTaskDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 26,
|
||||
"title": "Implement add-subtask MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for adding subtasks to existing tasks.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create addSubtaskDirect function in task-master-core.js:\\n - Import addSubtask from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: parentTaskId, title, description, details\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create add-subtask.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import addSubtaskDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerAddSubtaskTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for addSubtaskDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 27,
|
||||
"title": "Implement remove-subtask MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for removing subtasks from tasks.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create removeSubtaskDirect function in task-master-core.js:\\n - Import removeSubtask from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: parentTaskId, subtaskId\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create remove-subtask.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import removeSubtaskDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerRemoveSubtaskTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for removeSubtaskDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 28,
|
||||
"title": "Implement analyze MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for analyzing task complexity.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create analyzeTaskComplexityDirect function in task-master-core.js:\\n - Import analyzeTaskComplexity from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: taskId\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create analyze.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import analyzeTaskComplexityDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerAnalyzeTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for analyzeTaskComplexityDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 29,
|
||||
"title": "Implement clear-subtasks MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for clearing subtasks from a parent task.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create clearSubtasksDirect function in task-master-core.js:\\n - Import clearSubtasks from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: taskId\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create clear-subtasks.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import clearSubtasksDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerClearSubtasksTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for clearSubtasksDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
},
|
||||
{
|
||||
"id": 30,
|
||||
"title": "Implement expand-all MCP command",
|
||||
"description": "Create direct function wrapper and MCP tool for expanding all tasks into subtasks.",
|
||||
"details": "Following MCP implementation standards:\\n\\n1. Create expandAllTasksDirect function in task-master-core.js:\\n - Import expandAllTasks from task-manager.js\\n - Handle file paths using findTasksJsonPath utility\\n - Process arguments: prompt, num, force, research\\n - Validate inputs and handle errors with try/catch\\n - Return standardized { success, data/error } object\\n - Add to directFunctions map\\n\\n2. Create expand-all.js MCP tool in mcp-server/src/tools/:\\n - Import z from zod for parameter schema\\n - Import executeMCPToolAction from ./utils.js\\n - Import expandAllTasksDirect from task-master-core.js\\n - Define parameters matching CLI options using zod schema\\n - Implement registerExpandAllTool(server) with server.addTool\\n - Use executeMCPToolAction in execute method\\n\\n3. Register in tools/index.js\\n\\n4. Add to .cursor/mcp.json with appropriate schema\\n\\n5. Write tests following testing guidelines:\\n - Unit test for expandAllTasksDirect\\n - Integration test for MCP tool",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 23
|
||||
}
|
||||
]
|
||||
@@ -1714,13 +1932,120 @@
|
||||
},
|
||||
{
|
||||
"id": 32,
|
||||
"title": "Implement 'learn' Command for Automatic Cursor Rule Generation",
|
||||
"description": "Create a new 'learn' command that analyzes code changes and chat history to automatically generate or update Cursor rules in the .cursor/rules directory based on successful implementation patterns.",
|
||||
"title": "Implement \"learn\" Command for Automatic Cursor Rule Generation",
|
||||
"description": "Create a new \"learn\" command that analyzes Cursor's chat history and code changes to automatically generate or update rule files in the .cursor/rules directory, following the cursor_rules.mdc template format. This command will help Cursor autonomously improve its ability to follow development standards by learning from successful implementations.",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "high",
|
||||
"details": "Implement a new command in the task-master CLI that enables Cursor to learn from successful coding patterns:\n\n1. Create a new module `commands/learn.js` that implements the command logic\n2. Update `index.js` to register the new command\n3. The command should:\n - Accept an optional parameter for specifying which patterns to focus on\n - Use git diff to extract code changes since the last commit\n - Access the Cursor chat history if possible (investigate API or file storage location)\n - Call Claude via ai-services.js with the following context:\n * Code diffs\n * Chat history excerpts showing challenges and solutions\n * Existing rules from .cursor/rules if present\n - Parse Claude's response to extract rule definitions\n - Create or update .mdc files in the .cursor/rules directory\n - Provide a summary of what was learned and which rules were updated\n\n4. Create helper functions to:\n - Extract relevant patterns from diffs\n - Format the prompt for Claude to focus on identifying reusable patterns\n - Parse Claude's response into valid rule definitions\n - Handle rule conflicts or duplications\n\n5. Ensure the command handles errors gracefully, especially if chat history is inaccessible\n6. Add appropriate logging to show the learning process\n7. Document the command in the README.md file",
|
||||
"testStrategy": "1. Unit tests:\n - Create tests for each helper function in isolation\n - Mock git diff responses and chat history data\n - Verify rule extraction logic works with different input patterns\n - Test error handling for various failure scenarios\n\n2. Integration tests:\n - Test the command in a repository with actual code changes\n - Verify it correctly generates .mdc files in the .cursor/rules directory\n - Check that generated rules follow the correct format\n - Verify the command correctly updates existing rules without losing custom modifications\n\n3. Manual testing scenarios:\n - Run the command after implementing a feature with specific patterns\n - Verify the generated rules capture the intended patterns\n - Test the command with and without existing rules\n - Verify the command works when chat history is available and when it isn't\n - Test with large diffs to ensure performance remains acceptable\n\n4. Validation:\n - After generating rules, use them in Cursor to verify they correctly guide future implementations\n - Have multiple team members test the command to ensure consistent results"
|
||||
"details": "Implement a new command in the task-master CLI that enables Cursor to learn from successful coding patterns and chat interactions:\n\nKey Components:\n1. Cursor Data Analysis\n - Access and parse Cursor's chat history from ~/Library/Application Support/Cursor/User/History\n - Extract relevant patterns, corrections, and successful implementations\n - Track file changes and their associated chat context\n\n2. Rule Management\n - Use cursor_rules.mdc as the template for all rule file formatting\n - Manage rule files in .cursor/rules directory\n - Support both creation and updates of rule files\n - Categorize rules based on context (testing, components, API, etc.)\n\n3. AI Integration\n - Utilize ai-services.js to interact with Claude\n - Provide comprehensive context including:\n * Relevant chat history showing the evolution of solutions\n * Code changes and their outcomes\n * Existing rules and template structure\n - Generate or update rules while maintaining template consistency\n\n4. Implementation Requirements:\n - Automatic triggering after task completion (configurable)\n - Manual triggering via CLI command\n - Proper error handling for missing or corrupt files\n - Validation against cursor_rules.mdc template\n - Performance optimization for large histories\n - Clear logging and progress indication\n\n5. Key Files:\n - commands/learn.js: Main command implementation\n - rules/cursor-rules-manager.js: Rule file management\n - utils/chat-history-analyzer.js: Cursor chat analysis\n - index.js: Command registration\n\n6. Security Considerations:\n - Safe file system operations\n - Proper error handling for inaccessible files\n - Validation of generated rules\n - Backup of existing rules before updates",
|
||||
"testStrategy": "1. Unit Tests:\n - Test each component in isolation:\n * Chat history extraction and analysis\n * Rule file management and validation\n * Pattern detection and categorization\n * Template validation logic\n - Mock file system operations and AI responses\n - Test error handling and edge cases\n\n2. Integration Tests:\n - End-to-end command execution\n - File system interactions\n - AI service integration\n - Rule generation and updates\n - Template compliance validation\n\n3. Manual Testing:\n - Test after completing actual development tasks\n - Verify rule quality and usefulness\n - Check template compliance\n - Validate performance with large histories\n - Test automatic and manual triggering\n\n4. Validation Criteria:\n - Generated rules follow cursor_rules.mdc format\n - Rules capture meaningful patterns\n - Performance remains acceptable\n - Error handling works as expected\n - Generated rules improve Cursor's effectiveness",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Create Initial File Structure",
|
||||
"description": "Set up the basic file structure for the learn command implementation",
|
||||
"details": "Create the following files with basic exports:\n- commands/learn.js\n- rules/cursor-rules-manager.js\n- utils/chat-history-analyzer.js\n- utils/cursor-path-helper.js",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Implement Cursor Path Helper",
|
||||
"description": "Create utility functions to handle Cursor's application data paths",
|
||||
"details": "In utils/cursor-path-helper.js implement:\n- getCursorAppDir(): Returns ~/Library/Application Support/Cursor\n- getCursorHistoryDir(): Returns User/History path\n- getCursorLogsDir(): Returns logs directory path\n- validatePaths(): Ensures required directories exist",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Create Chat History Analyzer Base",
|
||||
"description": "Create the base structure for analyzing Cursor's chat history",
|
||||
"details": "In utils/chat-history-analyzer.js create:\n- ChatHistoryAnalyzer class\n- readHistoryDir(): Lists all history directories\n- readEntriesJson(): Parses entries.json files\n- parseHistoryEntry(): Extracts relevant data from .js files",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Implement Chat History Extraction",
|
||||
"description": "Add core functionality to extract relevant chat history",
|
||||
"details": "In ChatHistoryAnalyzer add:\n- extractChatHistory(startTime): Gets history since task start\n- parseFileChanges(): Extracts code changes\n- parseAIInteractions(): Extracts AI responses\n- filterRelevantHistory(): Removes irrelevant entries",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Create CursorRulesManager Base",
|
||||
"description": "Set up the base structure for managing Cursor rules",
|
||||
"details": "In rules/cursor-rules-manager.js create:\n- CursorRulesManager class\n- readTemplate(): Reads cursor_rules.mdc\n- listRuleFiles(): Lists all .mdc files\n- readRuleFile(): Reads specific rule file",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"title": "Implement Template Validation",
|
||||
"description": "Add validation logic for rule files against cursor_rules.mdc",
|
||||
"details": "In CursorRulesManager add:\n- validateRuleFormat(): Checks against template\n- parseTemplateStructure(): Extracts template sections\n- validateAgainstTemplate(): Validates content structure\n- getRequiredSections(): Lists mandatory sections",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"title": "Add Rule Categorization Logic",
|
||||
"description": "Implement logic to categorize changes into rule files",
|
||||
"details": "In CursorRulesManager add:\n- categorizeChanges(): Maps changes to rule files\n- detectRuleCategories(): Identifies relevant categories\n- getRuleFileForPattern(): Maps patterns to files\n- createNewRuleFile(): Initializes new rule files",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"title": "Implement Pattern Analysis",
|
||||
"description": "Create functions to analyze implementation patterns",
|
||||
"details": "In ChatHistoryAnalyzer add:\n- extractPatterns(): Finds success patterns\n- extractCorrections(): Finds error corrections\n- findSuccessfulPaths(): Tracks successful implementations\n- analyzeDecisions(): Extracts key decisions",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"title": "Create AI Prompt Builder",
|
||||
"description": "Implement prompt construction for Claude",
|
||||
"details": "In learn.js create:\n- buildRuleUpdatePrompt(): Builds Claude prompt\n- formatHistoryContext(): Formats chat history\n- formatRuleContext(): Formats current rules\n- buildInstructions(): Creates specific instructions",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"title": "Implement Learn Command Core",
|
||||
"description": "Create the main learn command implementation",
|
||||
"details": "In commands/learn.js implement:\n- learnCommand(): Main command function\n- processRuleUpdates(): Handles rule updates\n- generateSummary(): Creates learning summary\n- handleErrors(): Manages error cases",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"title": "Add Auto-trigger Support",
|
||||
"description": "Implement automatic learning after task completion",
|
||||
"details": "Update task-manager.js:\n- Add autoLearnConfig handling\n- Modify completeTask() to trigger learning\n- Add learning status tracking\n- Implement learning queue",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"title": "Implement CLI Integration",
|
||||
"description": "Add the learn command to the CLI",
|
||||
"details": "Update index.js to:\n- Register learn command\n- Add command options\n- Handle manual triggers\n- Process command flags",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 13,
|
||||
"title": "Add Progress Logging",
|
||||
"description": "Implement detailed progress logging",
|
||||
"details": "Create utils/learn-logger.js with:\n- logLearningProgress(): Tracks overall progress\n- logRuleUpdates(): Tracks rule changes\n- logErrors(): Handles error logging\n- createSummary(): Generates final report",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 14,
|
||||
"title": "Implement Error Recovery",
|
||||
"description": "Add robust error handling throughout the system",
|
||||
"details": "Create utils/error-handler.js with:\n- handleFileErrors(): Manages file system errors\n- handleParsingErrors(): Manages parsing failures\n- handleAIErrors(): Manages Claude API errors\n- implementRecoveryStrategies(): Adds recovery logic",
|
||||
"status": "pending"
|
||||
},
|
||||
{
|
||||
"id": 15,
|
||||
"title": "Add Performance Optimization",
|
||||
"description": "Optimize performance for large histories",
|
||||
"details": "Add to utils/performance-optimizer.js:\n- implementCaching(): Adds result caching\n- optimizeFileReading(): Improves file reading\n- addProgressiveLoading(): Implements lazy loading\n- addMemoryManagement(): Manages memory usage",
|
||||
"status": "pending"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 33,
|
||||
@@ -1731,6 +2056,246 @@
|
||||
"priority": "medium",
|
||||
"details": "This task involves creating a mechanism to generate a Windsurf-specific rules document by combining three existing MDC (Markdown Content) files that are currently used for Cursor Rules. The implementation should:\n\n1. Identify and locate the three primary .mdc files used for Cursor Rules\n2. Extract content from these files and merge them into a single document\n3. Refactor the content to make it Windsurf-specific, replacing Cursor-specific terminology and adapting guidelines as needed\n4. Create a function that generates a .windsurfrules document from this content\n5. Integrate this function into the initialization pipeline\n6. Implement logic to check if a .windsurfrules document already exists:\n - If it exists, append the new content to it\n - If it doesn't exist, create a new document\n7. Ensure proper error handling for file operations\n8. Add appropriate logging to track the generation and modification of the .windsurfrules document\n\nThe implementation should be modular and maintainable, with clear separation of concerns between content extraction, refactoring, and file operations.",
|
||||
"testStrategy": "Testing should verify both the content generation and the integration with the initialization pipeline:\n\n1. Unit Tests:\n - Test the content extraction function with mock .mdc files\n - Test the content refactoring function to ensure Cursor-specific terms are properly replaced\n - Test the file operation functions with mock filesystem\n\n2. Integration Tests:\n - Test the creation of a new .windsurfrules document when none exists\n - Test appending to an existing .windsurfrules document\n - Test the complete initialization pipeline with the new functionality\n\n3. Manual Verification:\n - Inspect the generated .windsurfrules document to ensure content is properly combined and refactored\n - Verify that Cursor-specific terminology has been replaced with Windsurf-specific terminology\n - Run the initialization process multiple times to verify idempotence (content isn't duplicated on multiple runs)\n\n4. Edge Cases:\n - Test with missing or corrupted .mdc files\n - Test with an existing but empty .windsurfrules document\n - Test with an existing .windsurfrules document that already contains some of the content"
|
||||
},
|
||||
{
|
||||
"id": 34,
|
||||
"title": "Implement updateTask Command for Single Task Updates",
|
||||
"description": "Create a new command that allows updating a specific task by ID using AI-driven refinement while preserving completed subtasks and supporting all existing update command options.",
|
||||
"status": "done",
|
||||
"dependencies": [],
|
||||
"priority": "high",
|
||||
"details": "Implement a new command called 'updateTask' that focuses on updating a single task rather than all tasks from an ID onwards. The implementation should:\n\n1. Accept a single task ID as a required parameter\n2. Use the same AI-driven approach as the existing update command to refine the task\n3. Preserve the completion status of any subtasks that were previously marked as complete\n4. Support all options from the existing update command including:\n - The research flag for Perplexity integration\n - Any formatting or refinement options\n - Task context options\n5. Update the CLI help documentation to include this new command\n6. Ensure the command follows the same pattern as other commands in the codebase\n7. Add appropriate error handling for cases where the specified task ID doesn't exist\n8. Implement the ability to update task title, description, and details separately if needed\n9. Ensure the command returns appropriate success/failure messages\n10. Optimize the implementation to only process the single task rather than scanning through all tasks\n\nThe command should reuse existing AI prompt templates where possible but modify them to focus on refining a single task rather than multiple tasks.",
|
||||
"testStrategy": "Testing should verify the following aspects:\n\n1. **Basic Functionality Test**: Verify that the command successfully updates a single task when given a valid task ID\n2. **Preservation Test**: Create a task with completed subtasks, update it, and verify the completion status remains intact\n3. **Research Flag Test**: Test the command with the research flag and verify it correctly integrates with Perplexity\n4. **Error Handling Tests**:\n - Test with non-existent task ID and verify appropriate error message\n - Test with invalid parameters and verify helpful error messages\n5. **Integration Test**: Run a complete workflow that creates a task, updates it with updateTask, and then verifies the changes are persisted\n6. **Comparison Test**: Compare the results of updating a single task with updateTask versus using the original update command on the same task to ensure consistent quality\n7. **Performance Test**: Measure execution time compared to the full update command to verify efficiency gains\n8. **CLI Help Test**: Verify the command appears correctly in help documentation with appropriate descriptions\n\nCreate unit tests for the core functionality and integration tests for the complete workflow. Document any edge cases discovered during testing.",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Create updateTaskById function in task-manager.js",
|
||||
"description": "Implement a new function in task-manager.js that focuses on updating a single task by ID using AI-driven refinement while preserving completed subtasks.",
|
||||
"dependencies": [],
|
||||
"details": "Implementation steps:\n1. Create a new `updateTaskById` function in task-manager.js that accepts parameters: taskId, options object (containing research flag, formatting options, etc.)\n2. Implement logic to find a specific task by ID in the tasks array\n3. Add appropriate error handling for cases where the task ID doesn't exist (throw a custom error)\n4. Reuse existing AI prompt templates but modify them to focus on refining a single task\n5. Implement logic to preserve completion status of subtasks that were previously marked as complete\n6. Add support for updating task title, description, and details separately based on options\n7. Optimize the implementation to only process the single task rather than scanning through all tasks\n8. Return the updated task and appropriate success/failure messages\n\nTesting approach:\n- Unit test the function with various scenarios including:\n - Valid task ID with different update options\n - Non-existent task ID\n - Task with completed subtasks to verify preservation\n - Different combinations of update options",
|
||||
"status": "done",
|
||||
"parentTaskId": 34
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Implement updateTask command in commands.js",
|
||||
"description": "Create a new command called 'updateTask' in commands.js that leverages the updateTaskById function to update a specific task by ID.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Implementation steps:\n1. Create a new command object for 'updateTask' in commands.js following the Command pattern\n2. Define command parameters including a required taskId parameter\n3. Support all options from the existing update command:\n - Research flag for Perplexity integration\n - Formatting and refinement options\n - Task context options\n4. Implement the command handler function that calls the updateTaskById function from task-manager.js\n5. Add appropriate error handling to catch and display user-friendly error messages\n6. Ensure the command follows the same pattern as other commands in the codebase\n7. Implement proper validation of input parameters\n8. Format and return appropriate success/failure messages to the user\n\nTesting approach:\n- Unit test the command handler with various input combinations\n- Test error handling scenarios\n- Verify command options are correctly passed to the updateTaskById function",
|
||||
"status": "done",
|
||||
"parentTaskId": 34
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Add comprehensive error handling and validation",
|
||||
"description": "Implement robust error handling and validation for the updateTask command to ensure proper user feedback and system stability.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"details": "Implementation steps:\n1. Create custom error types for different failure scenarios (TaskNotFoundError, ValidationError, etc.)\n2. Implement input validation for the taskId parameter and all options\n3. Add proper error handling for AI service failures with appropriate fallback mechanisms\n4. Implement concurrency handling to prevent conflicts when multiple updates occur simultaneously\n5. Add comprehensive logging for debugging and auditing purposes\n6. Ensure all error messages are user-friendly and actionable\n7. Implement proper HTTP status codes for API responses if applicable\n8. Add validation to ensure the task exists before attempting updates\n\nTesting approach:\n- Test various error scenarios including invalid inputs, non-existent tasks, and API failures\n- Verify error messages are clear and helpful\n- Test concurrency scenarios with multiple simultaneous updates\n- Verify logging captures appropriate information for troubleshooting",
|
||||
"status": "done",
|
||||
"parentTaskId": 34
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Write comprehensive tests for updateTask command",
|
||||
"description": "Create a comprehensive test suite for the updateTask command to ensure it works correctly in all scenarios and maintains backward compatibility.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"details": "Implementation steps:\n1. Create unit tests for the updateTaskById function in task-manager.js\n - Test finding and updating tasks with various IDs\n - Test preservation of completed subtasks\n - Test different update options combinations\n - Test error handling for non-existent tasks\n2. Create unit tests for the updateTask command in commands.js\n - Test command parameter parsing\n - Test option handling\n - Test error scenarios and messages\n3. Create integration tests that verify the end-to-end flow\n - Test the command with actual AI service integration\n - Test with mock AI responses for predictable testing\n4. Implement test fixtures and mocks for consistent testing\n5. Add performance tests to ensure the command is efficient\n6. Test edge cases such as empty tasks, tasks with many subtasks, etc.\n\nTesting approach:\n- Use Jest or similar testing framework\n- Implement mocks for external dependencies like AI services\n- Create test fixtures for consistent test data\n- Use snapshot testing for command output verification",
|
||||
"status": "done",
|
||||
"parentTaskId": 34
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Update CLI documentation and help text",
|
||||
"description": "Update the CLI help documentation to include the new updateTask command and ensure users understand its purpose and options.",
|
||||
"dependencies": [
|
||||
2
|
||||
],
|
||||
"details": "Implementation steps:\n1. Add comprehensive help text for the updateTask command including:\n - Command description\n - Required and optional parameters\n - Examples of usage\n - Description of all supported options\n2. Update the main CLI help documentation to include the new command\n3. Add the command to any relevant command groups or categories\n4. Create usage examples that demonstrate common scenarios\n5. Update README.md and other documentation files to include information about the new command\n6. Add inline code comments explaining the implementation details\n7. Update any API documentation if applicable\n8. Create or update user guides with the new functionality\n\nTesting approach:\n- Verify help text is displayed correctly when running `--help`\n- Review documentation for clarity and completeness\n- Have team members review the documentation for usability\n- Test examples to ensure they work as documented",
|
||||
"status": "done",
|
||||
"parentTaskId": 34
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 35,
|
||||
"title": "Integrate Grok3 API for Research Capabilities",
|
||||
"description": "Replace the current Perplexity API integration with Grok3 API for all research-related functionalities while maintaining existing feature parity.",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "This task involves migrating from Perplexity to Grok3 API for research capabilities throughout the application. Implementation steps include:\n\n1. Create a new API client module for Grok3 in `src/api/grok3.ts` that handles authentication, request formatting, and response parsing\n2. Update the research service layer to use the new Grok3 client instead of Perplexity\n3. Modify the request payload structure to match Grok3's expected format (parameters like temperature, max_tokens, etc.)\n4. Update response handling to properly parse and extract Grok3's response format\n5. Implement proper error handling for Grok3-specific error codes and messages\n6. Update environment variables and configuration files to include Grok3 API keys and endpoints\n7. Ensure rate limiting and quota management are properly implemented according to Grok3's specifications\n8. Update any UI components that display research provider information to show Grok3 instead of Perplexity\n9. Maintain backward compatibility for any stored research results from Perplexity\n10. Document the new API integration in the developer documentation\n\nGrok3 API has different parameter requirements and response formats compared to Perplexity, so careful attention must be paid to these differences during implementation.",
|
||||
"testStrategy": "Testing should verify that the Grok3 API integration works correctly and maintains feature parity with the previous Perplexity implementation:\n\n1. Unit tests:\n - Test the Grok3 API client with mocked responses\n - Verify proper error handling for various error scenarios (rate limits, authentication failures, etc.)\n - Test the transformation of application requests to Grok3-compatible format\n\n2. Integration tests:\n - Perform actual API calls to Grok3 with test credentials\n - Verify that research results are correctly parsed and returned\n - Test with various types of research queries to ensure broad compatibility\n\n3. End-to-end tests:\n - Test the complete research flow from UI input to displayed results\n - Verify that all existing research features work with the new API\n\n4. Performance tests:\n - Compare response times between Perplexity and Grok3\n - Ensure the application handles any differences in response time appropriately\n\n5. Regression tests:\n - Verify that existing features dependent on research capabilities continue to work\n - Test that stored research results from Perplexity are still accessible and displayed correctly\n\nCreate a test environment with both APIs available to compare results and ensure quality before fully replacing Perplexity with Grok3."
|
||||
},
|
||||
{
|
||||
"id": 36,
|
||||
"title": "Add Ollama Support for AI Services as Claude Alternative",
|
||||
"description": "Implement Ollama integration as an alternative to Claude for all main AI services, allowing users to run local language models instead of relying on cloud-based Claude API.",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "This task involves creating a comprehensive Ollama integration that can replace Claude across all main AI services in the application. Implementation should include:\n\n1. Create an OllamaService class that implements the same interface as the ClaudeService to ensure compatibility\n2. Add configuration options to specify Ollama endpoint URL (default: http://localhost:11434)\n3. Implement model selection functionality to allow users to choose which Ollama model to use (e.g., llama3, mistral, etc.)\n4. Handle prompt formatting specific to Ollama models, ensuring proper system/user message separation\n5. Implement proper error handling for cases where Ollama server is unavailable or returns errors\n6. Add fallback mechanism to Claude when Ollama fails or isn't configured\n7. Update the AI service factory to conditionally create either Claude or Ollama service based on configuration\n8. Ensure token counting and rate limiting are appropriately handled for Ollama models\n9. Add documentation for users explaining how to set up and use Ollama with the application\n10. Optimize prompt templates specifically for Ollama models if needed\n\nThe implementation should be toggled through a configuration option (useOllama: true/false) and should maintain all existing functionality currently provided by Claude.",
|
||||
"testStrategy": "Testing should verify that Ollama integration works correctly as a drop-in replacement for Claude:\n\n1. Unit tests:\n - Test OllamaService class methods in isolation with mocked responses\n - Verify proper error handling when Ollama server is unavailable\n - Test fallback mechanism to Claude when configured\n\n2. Integration tests:\n - Test with actual Ollama server running locally with at least two different models\n - Verify all AI service functions work correctly with Ollama\n - Compare outputs between Claude and Ollama for quality assessment\n\n3. Configuration tests:\n - Verify toggling between Claude and Ollama works as expected\n - Test with various model configurations\n\n4. Performance tests:\n - Measure and compare response times between Claude and Ollama\n - Test with different load scenarios\n\n5. Manual testing:\n - Verify all main AI features work correctly with Ollama\n - Test edge cases like very long inputs or specialized tasks\n\nCreate a test document comparing output quality between Claude and various Ollama models to help users understand the tradeoffs."
|
||||
},
|
||||
{
|
||||
"id": 37,
|
||||
"title": "Add Gemini Support for Main AI Services as Claude Alternative",
|
||||
"description": "Implement Google's Gemini API integration as an alternative to Claude for all main AI services, allowing users to switch between different LLM providers.",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "This task involves integrating Google's Gemini API across all main AI services that currently use Claude:\n\n1. Create a new GeminiService class that implements the same interface as the existing ClaudeService\n2. Implement authentication and API key management for Gemini API\n3. Map our internal prompt formats to Gemini's expected input format\n4. Handle Gemini-specific parameters (temperature, top_p, etc.) and response parsing\n5. Update the AI service factory/provider to support selecting Gemini as an alternative\n6. Add configuration options in settings to allow users to select Gemini as their preferred provider\n7. Implement proper error handling for Gemini-specific API errors\n8. Ensure streaming responses are properly supported if Gemini offers this capability\n9. Update documentation to reflect the new Gemini option\n10. Consider implementing model selection if Gemini offers multiple models (e.g., Gemini Pro, Gemini Ultra)\n11. Ensure all existing AI capabilities (summarization, code generation, etc.) maintain feature parity when using Gemini\n\nThe implementation should follow the same pattern as the recent Ollama integration (Task #36) to maintain consistency in how alternative AI providers are supported.",
|
||||
"testStrategy": "Testing should verify Gemini integration works correctly across all AI services:\n\n1. Unit tests:\n - Test GeminiService class methods with mocked API responses\n - Verify proper error handling for common API errors\n - Test configuration and model selection functionality\n\n2. Integration tests:\n - Verify authentication and API connection with valid credentials\n - Test each AI service with Gemini to ensure proper functionality\n - Compare outputs between Claude and Gemini for the same inputs to verify quality\n\n3. End-to-end tests:\n - Test the complete user flow of switching to Gemini and using various AI features\n - Verify streaming responses work correctly if supported\n\n4. Performance tests:\n - Measure and compare response times between Claude and Gemini\n - Test with various input lengths to verify handling of context limits\n\n5. Manual testing:\n - Verify the quality of Gemini responses across different use cases\n - Test edge cases like very long inputs or specialized domain knowledge\n\nAll tests should pass with Gemini selected as the provider, and the user experience should be consistent regardless of which provider is selected."
|
||||
},
|
||||
{
|
||||
"id": 38,
|
||||
"title": "Implement Version Check System with Upgrade Notifications",
|
||||
"description": "Create a system that checks for newer package versions and displays upgrade notifications when users run any command, informing them to update to the latest version.",
|
||||
"status": "done",
|
||||
"dependencies": [],
|
||||
"priority": "high",
|
||||
"details": "Implement a version check mechanism that runs automatically with every command execution:\n\n1. Create a new module (e.g., `versionChecker.js`) that will:\n - Fetch the latest version from npm registry using the npm registry API (https://registry.npmjs.org/task-master-ai/latest)\n - Compare it with the current installed version (from package.json)\n - Store the last check timestamp to avoid excessive API calls (check once per day)\n - Cache the result to minimize network requests\n\n2. The notification should:\n - Use colored text (e.g., yellow background with black text) to be noticeable\n - Include the current version and latest version\n - Show the exact upgrade command: 'npm i task-master-ai@latest'\n - Be displayed at the beginning or end of command output, not interrupting the main content\n - Include a small separator line to distinguish it from command output\n\n3. Implementation considerations:\n - Handle network failures gracefully (don't block command execution if version check fails)\n - Add a configuration option to disable update checks if needed\n - Ensure the check is lightweight and doesn't significantly impact command performance\n - Consider using a package like 'semver' for proper version comparison\n - Implement a cooldown period (e.g., only check once per day) to avoid excessive API calls\n\n4. The version check should be integrated into the main command execution flow so it runs for all commands automatically.",
|
||||
"testStrategy": "1. Manual testing:\n - Install an older version of the package\n - Run various commands and verify the update notification appears\n - Update to the latest version and confirm the notification no longer appears\n - Test with network disconnected to ensure graceful handling of failures\n\n2. Unit tests:\n - Mock the npm registry response to test different scenarios:\n - When a newer version exists\n - When using the latest version\n - When the registry is unavailable\n - Test the version comparison logic with various version strings\n - Test the cooldown/caching mechanism works correctly\n\n3. Integration tests:\n - Create a test that runs a command and verifies the notification appears in the expected format\n - Test that the notification appears for all commands\n - Verify the notification doesn't interfere with normal command output\n\n4. Edge cases to test:\n - Pre-release versions (alpha/beta)\n - Very old versions\n - When package.json is missing or malformed\n - When npm registry returns unexpected data"
|
||||
},
|
||||
{
|
||||
"id": 39,
|
||||
"title": "Update Project Licensing to Dual License Structure",
|
||||
"description": "Replace the current MIT license with a dual license structure that protects commercial rights for project owners while allowing non-commercial use under an open source license.",
|
||||
"status": "done",
|
||||
"dependencies": [],
|
||||
"priority": "high",
|
||||
"details": "This task requires implementing a comprehensive licensing update across the project:\n\n1. Remove all instances of the MIT license from the codebase, including any MIT license files, headers in source files, and references in documentation.\n\n2. Create a dual license structure with:\n - Business Source License (BSL) 1.1 or similar for commercial use, explicitly stating that commercial rights are exclusively reserved for Ralph & Eyal\n - Apache 2.0 for non-commercial use, allowing the community to use, modify, and distribute the code for non-commercial purposes\n\n3. Update the license field in package.json to reflect the dual license structure (e.g., \"BSL 1.1 / Apache 2.0\")\n\n4. Add a clear, concise explanation of the licensing terms in the README.md, including:\n - A summary of what users can and cannot do with the code\n - Who holds commercial rights\n - How to obtain commercial use permission if needed\n - Links to the full license texts\n\n5. Create a detailed LICENSE.md file that includes:\n - Full text of both licenses\n - Clear delineation between commercial and non-commercial use\n - Specific definitions of what constitutes commercial use\n - Any additional terms or clarifications specific to this project\n\n6. Create a CONTRIBUTING.md file that explicitly states:\n - Contributors must agree that their contributions will be subject to the project's dual licensing\n - Commercial rights for all contributions are assigned to Ralph & Eyal\n - Guidelines for acceptable contributions\n\n7. Ensure all source code files include appropriate license headers that reference the dual license structure.",
|
||||
"testStrategy": "To verify correct implementation, perform the following checks:\n\n1. File verification:\n - Confirm the MIT license file has been removed\n - Verify LICENSE.md exists and contains both BSL and Apache 2.0 license texts\n - Confirm README.md includes the license section with clear explanation\n - Verify CONTRIBUTING.md exists with proper contributor guidelines\n - Check package.json for updated license field\n\n2. Content verification:\n - Review LICENSE.md to ensure it properly describes the dual license structure with clear terms\n - Verify README.md license section is concise yet complete\n - Check that commercial rights are explicitly reserved for Ralph & Eyal in all relevant documents\n - Ensure CONTRIBUTING.md clearly explains the licensing implications for contributors\n\n3. Legal review:\n - Have a team member not involved in the implementation review all license documents\n - Verify that the chosen BSL terms properly protect commercial interests\n - Confirm the Apache 2.0 implementation is correct and compatible with the BSL portions\n\n4. Source code check:\n - Sample at least 10 source files to ensure they have updated license headers\n - Verify no MIT license references remain in any source files\n\n5. Documentation check:\n - Ensure any documentation that mentioned licensing has been updated to reflect the new structure",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Remove MIT License and Create Dual License Files",
|
||||
"description": "Remove all MIT license references from the codebase and create the new license files for the dual license structure.",
|
||||
"dependencies": [],
|
||||
"details": "Implementation steps:\n1. Scan the entire codebase to identify all instances of MIT license references (license files, headers in source files, documentation mentions).\n2. Remove the MIT license file and all direct references to it.\n3. Create a LICENSE.md file containing:\n - Full text of Business Source License (BSL) 1.1 with explicit commercial rights reservation for Ralph & Eyal\n - Full text of Apache 2.0 license for non-commercial use\n - Clear definitions of what constitutes commercial vs. non-commercial use\n - Specific terms for obtaining commercial use permission\n4. Create a CONTRIBUTING.md file that explicitly states the contribution terms:\n - Contributors must agree to the dual licensing structure\n - Commercial rights for all contributions are assigned to Ralph & Eyal\n - Guidelines for acceptable contributions\n\nTesting approach:\n- Verify all MIT license references have been removed using a grep or similar search tool\n- Have legal review of the LICENSE.md and CONTRIBUTING.md files to ensure they properly protect commercial rights\n- Validate that the license files are properly formatted and readable",
|
||||
"status": "done",
|
||||
"parentTaskId": 39
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Update Source Code License Headers and Package Metadata",
|
||||
"description": "Add appropriate dual license headers to all source code files and update package metadata to reflect the new licensing structure.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Implementation steps:\n1. Create a template for the new license header that references the dual license structure (BSL 1.1 / Apache 2.0).\n2. Systematically update all source code files to include the new license header, replacing any existing MIT headers.\n3. Update the license field in package.json to \"BSL 1.1 / Apache 2.0\".\n4. Update any other metadata files (composer.json, setup.py, etc.) that contain license information.\n5. Verify that any build scripts or tools that reference licensing information are updated.\n\nTesting approach:\n- Write a script to verify that all source files contain the new license header\n- Validate package.json and other metadata files have the correct license field\n- Ensure any build processes that depend on license information still function correctly\n- Run a sample build to confirm license information is properly included in any generated artifacts",
|
||||
"status": "done",
|
||||
"parentTaskId": 39
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Update Documentation and Create License Explanation",
|
||||
"description": "Update project documentation to clearly explain the dual license structure and create comprehensive licensing guidance.",
|
||||
"dependencies": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"details": "Implementation steps:\n1. Update the README.md with a clear, concise explanation of the licensing terms:\n - Summary of what users can and cannot do with the code\n - Who holds commercial rights (Ralph & Eyal)\n - How to obtain commercial use permission\n - Links to the full license texts\n2. Create a dedicated LICENSING.md or similar document with detailed explanations of:\n - The rationale behind the dual licensing approach\n - Detailed examples of what constitutes commercial vs. non-commercial use\n - FAQs addressing common licensing questions\n3. Update any other documentation references to licensing throughout the project.\n4. Create visual aids (if appropriate) to help users understand the licensing structure.\n5. Ensure all documentation links to licensing information are updated.\n\nTesting approach:\n- Have non-technical stakeholders review the documentation for clarity and understanding\n- Verify all links to license files work correctly\n- Ensure the explanation is comprehensive but concise enough for users to understand quickly\n- Check that the documentation correctly addresses the most common use cases and questions",
|
||||
"status": "done",
|
||||
"parentTaskId": 39
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 40,
|
||||
"title": "Implement Project Funding Documentation and Support Infrastructure",
|
||||
"description": "Create FUNDING.yml for GitHub Sponsors integration that outlines all financial support options for the Task Master project.",
|
||||
"status": "in-progress",
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "This task involves creating a FUNDING.yml file to enable and manage funding options for the Task Master project:\n\n**FUNDING.yml file**:\n - Create a .github/FUNDING.yml file following GitHub's specifications\n - Include configuration for multiple funding platforms:\n - GitHub Sponsors (primary if available)\n - Open Collective\n - Patreon\n - Ko-fi\n - Liberapay\n - Custom funding URLs (project website donation page)\n - Research and reference successful implementation patterns from Vue.js, React, and TypeScript projects\n - Ensure the FUNDING.yml contains sufficient information to guide users on how to support the project\n - Include comments within the YAML file to provide context for each funding option\n\nThe implementation should maintain consistent branding and messaging with the rest of the Task Master project. Research at least 5 successful open source projects to identify best practices in funding configuration.",
|
||||
"testStrategy": "Testing should verify the technical implementation of the FUNDING.yml file:\n\n1. **FUNDING.yml validation**:\n - Verify the file is correctly placed in the .github directory\n - Validate YAML syntax using a linter\n - Test that GitHub correctly displays funding options on the repository page\n - Verify all links to external funding platforms are functional\n\n2. **User experience testing**:\n - Test the complete funding workflow from a potential supporter's perspective\n - Verify the process is intuitive and barriers to contribution are minimized\n - Check that the Sponsor button appears correctly on GitHub\n - Ensure all funding platform links resolve to the correct destinations\n - Gather feedback from 2-3 potential users on clarity and ease of use",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Research and Create FUNDING.yml File",
|
||||
"description": "Research successful funding configurations and create the .github/FUNDING.yml file for GitHub Sponsors integration and other funding platforms.",
|
||||
"dependencies": [],
|
||||
"details": "Implementation steps:\n1. Create the .github directory at the project root if it doesn't exist\n2. Research funding configurations from 5 successful open source projects (Vue.js, React, TypeScript, etc.)\n3. Document the patterns and approaches used in these projects\n4. Create the FUNDING.yml file with the following platforms:\n - GitHub Sponsors (primary)\n - Open Collective\n - Patreon\n - Ko-fi\n - Liberapay\n - Custom donation URL for the project website\n5. Validate the YAML syntax using a linter\n6. Test the file by pushing to a test branch and verifying the Sponsor button appears correctly on GitHub\n\nTesting approach:\n- Validate YAML syntax using yamllint or similar tool\n- Test on GitHub by checking if the Sponsor button appears in the repository\n- Verify each funding link resolves to the correct destination",
|
||||
"status": "done",
|
||||
"parentTaskId": 40
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Add Documentation Comments to FUNDING.yml",
|
||||
"description": "Add comprehensive comments within the FUNDING.yml file to provide context and guidance for each funding option.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Implementation steps:\n1. Add a header comment explaining the purpose of the file\n2. For each funding platform entry, add comments that explain:\n - What the platform is\n - How funds are processed on this platform\n - Any specific benefits of using this platform\n - Brief instructions for potential sponsors\n3. Include a comment about how sponsors will be acknowledged\n4. Add information about fund allocation (maintenance, new features, infrastructure)\n5. Ensure comments follow YAML comment syntax and don't break the file structure\n\nTesting approach:\n- Validate that the YAML file still passes linting with comments added\n- Verify the file still functions correctly on GitHub\n- Have at least one team member review the comments for clarity and completeness",
|
||||
"status": "pending",
|
||||
"parentTaskId": 40
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Integrate Funding Information in Project README",
|
||||
"description": "Add a section to the project README that highlights the funding options and directs users to the Sponsor button.",
|
||||
"dependencies": [
|
||||
1,
|
||||
4
|
||||
],
|
||||
"details": "Implementation steps:\n1. Create a 'Support the Project' or 'Sponsorship' section in the README.md\n2. Explain briefly why financial support matters for the project\n3. Direct users to the GitHub Sponsor button\n4. Mention the alternative funding platforms available\n5. Include a brief note on how funds will be used\n6. Add any relevant funding badges (e.g., Open Collective, GitHub Sponsors)\n\nTesting approach:\n- Review the README section for clarity and conciseness\n- Verify all links work correctly\n- Ensure the section is appropriately visible but doesn't overshadow project information\n- Check that badges render correctly",
|
||||
"status": "pending",
|
||||
"parentTaskId": 40
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 41,
|
||||
"title": "Implement GitHub Actions CI Workflow for Cross-Platform Testing",
|
||||
"description": "Create a CI workflow file (ci.yml) that tests the codebase across multiple Node.js versions and operating systems using GitHub Actions.",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "high",
|
||||
"details": "Create a GitHub Actions workflow file at `.github/workflows/ci.yml` with the following specifications:\n\n1. Configure the workflow to trigger on:\n - Push events to any branch\n - Pull request events targeting any branch\n\n2. Implement a matrix strategy that tests across:\n - Node.js versions: 18.x, 20.x, and 22.x\n - Operating systems: Ubuntu-latest and Windows-latest\n\n3. Include proper Git configuration steps:\n - Set Git user name to 'GitHub Actions'\n - Set Git email to 'github-actions@github.com'\n\n4. Configure workflow steps to:\n - Checkout the repository using actions/checkout@v3\n - Set up Node.js using actions/setup-node@v3 with the matrix version\n - Use npm for package management (not pnpm)\n - Install dependencies with 'npm ci'\n - Run linting with 'npm run lint' (if available)\n - Run tests with 'npm test'\n - Run build process with 'npm run build'\n\n5. Implement concurrency controls to:\n - Cancel in-progress workflows when new commits are pushed to the same PR\n - Use a concurrency group based on the GitHub ref and workflow name\n\n6. Add proper caching for npm dependencies to speed up workflow runs\n\n7. Ensure the workflow includes appropriate timeouts to prevent hung jobs",
|
||||
"testStrategy": "To verify correct implementation of the GitHub Actions CI workflow:\n\n1. Manual verification:\n - Check that the file is correctly placed at `.github/workflows/ci.yml`\n - Verify the YAML syntax is valid using a YAML linter\n - Confirm all required configurations (triggers, matrix, steps) are present\n\n2. Functional testing:\n - Push a commit to a feature branch to confirm the workflow triggers\n - Create a PR to verify the workflow runs on pull requests\n - Verify the workflow successfully runs on both Ubuntu and Windows\n - Confirm tests run against all three Node.js versions (18, 20, 22)\n - Test concurrency by pushing multiple commits to the same PR rapidly\n\n3. Edge case testing:\n - Introduce a failing test and verify the workflow reports failure\n - Test with a large dependency tree to verify caching works correctly\n - Verify the workflow handles non-ASCII characters in file paths correctly (particularly on Windows)\n\n4. Check workflow logs to ensure:\n - Git configuration is applied correctly\n - Dependencies are installed with npm (not pnpm)\n - All matrix combinations run independently\n - Concurrency controls cancel redundant workflow runs",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Create Basic GitHub Actions Workflow Structure",
|
||||
"description": "Set up the foundational GitHub Actions workflow file with triggers, checkout, and Node.js setup using matrix strategy",
|
||||
"dependencies": [],
|
||||
"details": "1. Create `.github/workflows/` directory if it doesn't exist\n2. Create a new file `ci.yml` inside this directory\n3. Define the workflow name at the top of the file\n4. Configure triggers for push events to any branch and pull request events targeting any branch\n5. Set up the matrix strategy for Node.js versions (18.x, 20.x, 22.x) and operating systems (Ubuntu-latest, Windows-latest)\n6. Configure the job to checkout the repository using actions/checkout@v3\n7. Set up Node.js using actions/setup-node@v3 with the matrix version\n8. Add proper caching for npm dependencies\n9. Test the workflow by pushing the file to a test branch and verifying it triggers correctly\n10. Verify that the matrix builds are running on all specified Node versions and operating systems",
|
||||
"status": "pending",
|
||||
"parentTaskId": 41
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Implement Build and Test Steps with Git Configuration",
|
||||
"description": "Add the core build and test steps to the workflow, including Git configuration, dependency installation, and execution of lint, test, and build commands",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "1. Add Git configuration steps to set user name to 'GitHub Actions' and email to 'github-actions@github.com'\n2. Add step to install dependencies with 'npm ci'\n3. Add conditional step to run linting with 'npm run lint' if available\n4. Add step to run tests with 'npm test'\n5. Add step to run build process with 'npm run build'\n6. Ensure each step has appropriate names for clear visibility in GitHub Actions UI\n7. Add appropriate error handling and continue-on-error settings where needed\n8. Test the workflow by pushing a change and verifying all build steps execute correctly\n9. Verify that the workflow correctly runs on both Ubuntu and Windows environments\n10. Ensure that all commands use the correct syntax for cross-platform compatibility",
|
||||
"status": "pending",
|
||||
"parentTaskId": 41
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Add Workflow Optimization Features",
|
||||
"description": "Implement concurrency controls, timeouts, and other optimization features to improve workflow efficiency and reliability",
|
||||
"dependencies": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"details": "1. Implement concurrency controls to cancel in-progress workflows when new commits are pushed to the same PR\n2. Define a concurrency group based on the GitHub ref and workflow name\n3. Add appropriate timeouts to prevent hung jobs (typically 30-60 minutes depending on project complexity)\n4. Add status badges to the README.md file to show build status\n5. Optimize the workflow by adding appropriate 'if' conditions to skip unnecessary steps\n6. Add job summary outputs to provide clear information about the build results\n7. Test the concurrency feature by pushing multiple commits in quick succession to a PR\n8. Verify that old workflow runs are canceled when new commits are pushed\n9. Test timeout functionality by temporarily adding a long-running step\n10. Document the CI workflow in project documentation, explaining what it does and how to troubleshoot common issues",
|
||||
"status": "pending",
|
||||
"parentTaskId": 41
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
83
test-version-check-full.js
Normal file
83
test-version-check-full.js
Normal file
@@ -0,0 +1,83 @@
|
||||
import {
|
||||
checkForUpdate,
|
||||
displayUpgradeNotification,
|
||||
compareVersions
|
||||
} from './scripts/modules/commands.js';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
|
||||
// Force our current version for testing
|
||||
process.env.FORCE_VERSION = '0.9.30';
|
||||
|
||||
// Create a mock package.json in memory for testing
|
||||
const mockPackageJson = {
|
||||
name: 'task-master-ai',
|
||||
version: '0.9.30'
|
||||
};
|
||||
|
||||
// Modified version of checkForUpdate that doesn't use HTTP for testing
|
||||
async function testCheckForUpdate(simulatedLatestVersion) {
|
||||
// Get current version - use our forced version
|
||||
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
|
||||
|
||||
console.log(`Using simulated current version: ${currentVersion}`);
|
||||
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
|
||||
|
||||
// Compare versions
|
||||
const needsUpdate =
|
||||
compareVersions(currentVersion, simulatedLatestVersion) < 0;
|
||||
|
||||
return {
|
||||
currentVersion,
|
||||
latestVersion: simulatedLatestVersion,
|
||||
needsUpdate
|
||||
};
|
||||
}
|
||||
|
||||
// Test with current version older than latest (should show update notice)
|
||||
async function runTest() {
|
||||
console.log('=== Testing version check scenarios ===\n');
|
||||
|
||||
// Scenario 1: Update available
|
||||
console.log(
|
||||
'\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---'
|
||||
);
|
||||
const updateInfo1 = await testCheckForUpdate('1.0.0');
|
||||
console.log('Update check results:');
|
||||
console.log(`- Current version: ${updateInfo1.currentVersion}`);
|
||||
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
|
||||
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
|
||||
|
||||
if (updateInfo1.needsUpdate) {
|
||||
console.log('\nDisplaying upgrade notification:');
|
||||
displayUpgradeNotification(
|
||||
updateInfo1.currentVersion,
|
||||
updateInfo1.latestVersion
|
||||
);
|
||||
}
|
||||
|
||||
// Scenario 2: No update needed (versions equal)
|
||||
console.log(
|
||||
'\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---'
|
||||
);
|
||||
const updateInfo2 = await testCheckForUpdate('0.9.30');
|
||||
console.log('Update check results:');
|
||||
console.log(`- Current version: ${updateInfo2.currentVersion}`);
|
||||
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
|
||||
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
|
||||
|
||||
// Scenario 3: Development version (current newer than latest)
|
||||
console.log(
|
||||
'\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---'
|
||||
);
|
||||
const updateInfo3 = await testCheckForUpdate('0.9.0');
|
||||
console.log('Update check results:');
|
||||
console.log(`- Current version: ${updateInfo3.currentVersion}`);
|
||||
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
|
||||
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
|
||||
|
||||
console.log('\n=== Test complete ===');
|
||||
}
|
||||
|
||||
// Run all tests
|
||||
runTest();
|
||||
35
test-version-check.js
Normal file
35
test-version-check.js
Normal file
@@ -0,0 +1,35 @@
|
||||
import {
|
||||
displayUpgradeNotification,
|
||||
compareVersions
|
||||
} from './scripts/modules/commands.js';
|
||||
|
||||
// Simulate different version scenarios
|
||||
console.log('=== Simulating version check ===\n');
|
||||
|
||||
// 1. Current version is older than latest (should show update notice)
|
||||
console.log('Scenario 1: Current version older than latest');
|
||||
displayUpgradeNotification('0.9.30', '1.0.0');
|
||||
|
||||
// 2. Current version same as latest (no update needed)
|
||||
console.log(
|
||||
'\nScenario 2: Current version same as latest (this would not normally show a notice)'
|
||||
);
|
||||
console.log('Current: 1.0.0, Latest: 1.0.0');
|
||||
console.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));
|
||||
console.log(
|
||||
'Update needed:',
|
||||
compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No'
|
||||
);
|
||||
|
||||
// 3. Current version newer than latest (e.g., development version, would not show notice)
|
||||
console.log(
|
||||
'\nScenario 3: Current version newer than latest (this would not normally show a notice)'
|
||||
);
|
||||
console.log('Current: 1.1.0, Latest: 1.0.0');
|
||||
console.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));
|
||||
console.log(
|
||||
'Update needed:',
|
||||
compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No'
|
||||
);
|
||||
|
||||
console.log('\n=== Test complete ===');
|
||||
@@ -60,4 +60,4 @@ We aim for at least 80% test coverage for all code paths. Coverage reports can b
|
||||
|
||||
```bash
|
||||
npm run test:coverage
|
||||
```
|
||||
```
|
||||
|
||||
86
tests/fixtures/sample-claude-response.js
vendored
86
tests/fixtures/sample-claude-response.js
vendored
@@ -3,42 +3,50 @@
|
||||
*/
|
||||
|
||||
export const sampleClaudeResponse = {
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: "Setup Task Data Structure",
|
||||
description: "Implement the core task data structure and file operations",
|
||||
status: "pending",
|
||||
dependencies: [],
|
||||
priority: "high",
|
||||
details: "Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.",
|
||||
testStrategy: "Verify tasks.json is created with the correct structure and that task data can be read from and written to the file."
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: "Implement CLI Foundation",
|
||||
description: "Create the command-line interface foundation with basic commands",
|
||||
status: "pending",
|
||||
dependencies: [1],
|
||||
priority: "high",
|
||||
details: "Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.",
|
||||
testStrategy: "Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly."
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: "Develop Task Management Operations",
|
||||
description: "Implement core operations for creating, reading, updating, and deleting tasks",
|
||||
status: "pending",
|
||||
dependencies: [1],
|
||||
priority: "medium",
|
||||
details: "Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.",
|
||||
testStrategy: "Create unit tests for each CRUD operation to verify they correctly modify the task data."
|
||||
}
|
||||
],
|
||||
metadata: {
|
||||
projectName: "Task Management CLI",
|
||||
totalTasks: 3,
|
||||
sourceFile: "tests/fixtures/sample-prd.txt",
|
||||
generatedAt: "2023-12-15"
|
||||
}
|
||||
};
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Setup Task Data Structure',
|
||||
description: 'Implement the core task data structure and file operations',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
priority: 'high',
|
||||
details:
|
||||
'Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.',
|
||||
testStrategy:
|
||||
'Verify tasks.json is created with the correct structure and that task data can be read from and written to the file.'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Implement CLI Foundation',
|
||||
description:
|
||||
'Create the command-line interface foundation with basic commands',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'high',
|
||||
details:
|
||||
'Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.',
|
||||
testStrategy:
|
||||
'Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly.'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Develop Task Management Operations',
|
||||
description:
|
||||
'Implement core operations for creating, reading, updating, and deleting tasks',
|
||||
status: 'pending',
|
||||
dependencies: [1],
|
||||
priority: 'medium',
|
||||
details:
|
||||
'Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.',
|
||||
testStrategy:
|
||||
'Create unit tests for each CRUD operation to verify they correctly modify the task data.'
|
||||
}
|
||||
],
|
||||
metadata: {
|
||||
projectName: 'Task Management CLI',
|
||||
totalTasks: 3,
|
||||
sourceFile: 'tests/fixtures/sample-prd.txt',
|
||||
generatedAt: '2023-12-15'
|
||||
}
|
||||
};
|
||||
|
||||
130
tests/fixtures/sample-tasks.js
vendored
130
tests/fixtures/sample-tasks.js
vendored
@@ -3,70 +3,72 @@
|
||||
*/
|
||||
|
||||
export const sampleTasks = {
|
||||
meta: {
|
||||
projectName: "Test Project",
|
||||
projectVersion: "1.0.0",
|
||||
createdAt: "2023-01-01T00:00:00.000Z",
|
||||
updatedAt: "2023-01-01T00:00:00.000Z"
|
||||
},
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: "Initialize Project",
|
||||
description: "Set up the project structure and dependencies",
|
||||
status: "done",
|
||||
dependencies: [],
|
||||
priority: "high",
|
||||
details: "Create directory structure, initialize package.json, and install dependencies",
|
||||
testStrategy: "Verify all directories and files are created correctly"
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: "Create Core Functionality",
|
||||
description: "Implement the main features of the application",
|
||||
status: "in-progress",
|
||||
dependencies: [1],
|
||||
priority: "high",
|
||||
details: "Implement user authentication, data processing, and API endpoints",
|
||||
testStrategy: "Write unit tests for all core functions"
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: "Implement UI Components",
|
||||
description: "Create the user interface components",
|
||||
status: "pending",
|
||||
dependencies: [2],
|
||||
priority: "medium",
|
||||
details: "Design and implement React components for the user interface",
|
||||
testStrategy: "Test components with React Testing Library",
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: "Create Header Component",
|
||||
description: "Implement the header component",
|
||||
status: "pending",
|
||||
dependencies: [],
|
||||
details: "Create a responsive header with navigation links"
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: "Create Footer Component",
|
||||
description: "Implement the footer component",
|
||||
status: "pending",
|
||||
dependencies: [],
|
||||
details: "Create a footer with copyright information and links"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
meta: {
|
||||
projectName: 'Test Project',
|
||||
projectVersion: '1.0.0',
|
||||
createdAt: '2023-01-01T00:00:00.000Z',
|
||||
updatedAt: '2023-01-01T00:00:00.000Z'
|
||||
},
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Initialize Project',
|
||||
description: 'Set up the project structure and dependencies',
|
||||
status: 'done',
|
||||
dependencies: [],
|
||||
priority: 'high',
|
||||
details:
|
||||
'Create directory structure, initialize package.json, and install dependencies',
|
||||
testStrategy: 'Verify all directories and files are created correctly'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Create Core Functionality',
|
||||
description: 'Implement the main features of the application',
|
||||
status: 'in-progress',
|
||||
dependencies: [1],
|
||||
priority: 'high',
|
||||
details:
|
||||
'Implement user authentication, data processing, and API endpoints',
|
||||
testStrategy: 'Write unit tests for all core functions'
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Implement UI Components',
|
||||
description: 'Create the user interface components',
|
||||
status: 'pending',
|
||||
dependencies: [2],
|
||||
priority: 'medium',
|
||||
details: 'Design and implement React components for the user interface',
|
||||
testStrategy: 'Test components with React Testing Library',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Create Header Component',
|
||||
description: 'Implement the header component',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Create a responsive header with navigation links'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Create Footer Component',
|
||||
description: 'Implement the footer component',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Create a footer with copyright information and links'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
export const emptySampleTasks = {
|
||||
meta: {
|
||||
projectName: "Empty Project",
|
||||
projectVersion: "1.0.0",
|
||||
createdAt: "2023-01-01T00:00:00.000Z",
|
||||
updatedAt: "2023-01-01T00:00:00.000Z"
|
||||
},
|
||||
tasks: []
|
||||
};
|
||||
meta: {
|
||||
projectName: 'Empty Project',
|
||||
projectVersion: '1.0.0',
|
||||
createdAt: '2023-01-01T00:00:00.000Z',
|
||||
updatedAt: '2023-01-01T00:00:00.000Z'
|
||||
},
|
||||
tasks: []
|
||||
};
|
||||
|
||||
191
tests/integration/mcp-server/direct-functions.test.js
Normal file
191
tests/integration/mcp-server/direct-functions.test.js
Normal file
@@ -0,0 +1,191 @@
|
||||
/**
|
||||
* Integration test for direct function imports in MCP server
|
||||
*/
|
||||
|
||||
import { jest } from '@jest/globals';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname } from 'path';
|
||||
|
||||
// Get the current module's directory
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
// Import the direct functions
|
||||
import { listTasksDirect } from '../../../mcp-server/src/core/task-master-core.js';
|
||||
|
||||
// Mock logger
|
||||
const mockLogger = {
|
||||
info: jest.fn(),
|
||||
error: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
warn: jest.fn()
|
||||
};
|
||||
|
||||
// Test file paths
|
||||
const testProjectRoot = path.join(__dirname, '../../fixture');
|
||||
const testTasksPath = path.join(testProjectRoot, 'test-tasks.json');
|
||||
|
||||
describe('MCP Server Direct Functions', () => {
|
||||
// Create test data before tests
|
||||
beforeAll(() => {
|
||||
// Create test directory if it doesn't exist
|
||||
if (!fs.existsSync(testProjectRoot)) {
|
||||
fs.mkdirSync(testProjectRoot, { recursive: true });
|
||||
}
|
||||
|
||||
// Create a sample tasks.json file for testing
|
||||
const sampleTasks = {
|
||||
meta: {
|
||||
projectName: 'Test Project',
|
||||
version: '1.0.0'
|
||||
},
|
||||
tasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Task 1',
|
||||
description: 'First task',
|
||||
status: 'done',
|
||||
dependencies: [],
|
||||
priority: 'high'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Task 2',
|
||||
description: 'Second task',
|
||||
status: 'in-progress',
|
||||
dependencies: [1],
|
||||
priority: 'medium',
|
||||
subtasks: [
|
||||
{
|
||||
id: 1,
|
||||
title: 'Subtask 2.1',
|
||||
description: 'First subtask',
|
||||
status: 'done'
|
||||
},
|
||||
{
|
||||
id: 2,
|
||||
title: 'Subtask 2.2',
|
||||
description: 'Second subtask',
|
||||
status: 'pending'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
id: 3,
|
||||
title: 'Task 3',
|
||||
description: 'Third task',
|
||||
status: 'pending',
|
||||
dependencies: [1, 2],
|
||||
priority: 'low'
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
fs.writeFileSync(testTasksPath, JSON.stringify(sampleTasks, null, 2));
|
||||
});
|
||||
|
||||
// Clean up after tests
|
||||
afterAll(() => {
|
||||
// Remove test tasks file
|
||||
if (fs.existsSync(testTasksPath)) {
|
||||
fs.unlinkSync(testTasksPath);
|
||||
}
|
||||
|
||||
// Try to remove the directory (will only work if empty)
|
||||
try {
|
||||
fs.rmdirSync(testProjectRoot);
|
||||
} catch (error) {
|
||||
// Ignore errors if the directory isn't empty
|
||||
}
|
||||
});
|
||||
|
||||
// Reset mocks before each test
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('listTasksDirect', () => {
|
||||
test('should return all tasks when no filter is provided', async () => {
|
||||
// Arrange
|
||||
const args = {
|
||||
projectRoot: testProjectRoot,
|
||||
file: testTasksPath
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await listTasksDirect(args, mockLogger);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.tasks.length).toBe(3);
|
||||
expect(result.data.stats.total).toBe(3);
|
||||
expect(result.data.stats.completed).toBe(1);
|
||||
expect(result.data.stats.inProgress).toBe(1);
|
||||
expect(result.data.stats.pending).toBe(1);
|
||||
expect(mockLogger.info).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should filter tasks by status', async () => {
|
||||
// Arrange
|
||||
const args = {
|
||||
projectRoot: testProjectRoot,
|
||||
file: testTasksPath,
|
||||
status: 'pending'
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await listTasksDirect(args, mockLogger);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.data.tasks.length).toBe(1);
|
||||
expect(result.data.tasks[0].id).toBe(3);
|
||||
expect(result.data.filter).toBe('pending');
|
||||
});
|
||||
|
||||
test('should include subtasks when requested', async () => {
|
||||
// Arrange
|
||||
const args = {
|
||||
projectRoot: testProjectRoot,
|
||||
file: testTasksPath,
|
||||
withSubtasks: true
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await listTasksDirect(args, mockLogger);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
|
||||
// Verify subtasks are included
|
||||
const taskWithSubtasks = result.data.tasks.find((t) => t.id === 2);
|
||||
expect(taskWithSubtasks.subtasks).toBeDefined();
|
||||
expect(taskWithSubtasks.subtasks.length).toBe(2);
|
||||
|
||||
// Verify subtask details
|
||||
expect(taskWithSubtasks.subtasks[0].id).toBe(1);
|
||||
expect(taskWithSubtasks.subtasks[0].title).toBe('Subtask 2.1');
|
||||
expect(taskWithSubtasks.subtasks[0].status).toBe('done');
|
||||
});
|
||||
|
||||
test('should handle errors gracefully', async () => {
|
||||
// Arrange
|
||||
const args = {
|
||||
projectRoot: testProjectRoot,
|
||||
file: 'non-existent-file.json'
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await listTasksDirect(args, mockLogger);
|
||||
|
||||
// Assert
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.error).toBeDefined();
|
||||
expect(result.error.code).toBeDefined();
|
||||
expect(result.error.message).toBeDefined();
|
||||
expect(mockLogger.error).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,6 +1,6 @@
|
||||
/**
|
||||
* Jest setup file
|
||||
*
|
||||
*
|
||||
* This file is run before each test suite to set up the test environment.
|
||||
*/
|
||||
|
||||
@@ -16,15 +16,15 @@ process.env.PROJECT_NAME = 'Test Project';
|
||||
process.env.PROJECT_VERSION = '1.0.0';
|
||||
|
||||
// Add global test helpers if needed
|
||||
global.wait = (ms) => new Promise(resolve => setTimeout(resolve, ms));
|
||||
global.wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
||||
|
||||
// If needed, silence console during tests
|
||||
if (process.env.SILENCE_CONSOLE === 'true') {
|
||||
global.console = {
|
||||
...console,
|
||||
log: jest.fn(),
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
};
|
||||
}
|
||||
global.console = {
|
||||
...console,
|
||||
log: jest.fn(),
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn()
|
||||
};
|
||||
}
|
||||
|
||||
@@ -10,62 +10,68 @@ const mockLog = jest.fn();
|
||||
|
||||
// Mock dependencies
|
||||
jest.mock('@anthropic-ai/sdk', () => {
|
||||
const mockCreate = jest.fn().mockResolvedValue({
|
||||
content: [{ text: 'AI response' }],
|
||||
});
|
||||
const mockAnthropicInstance = {
|
||||
messages: {
|
||||
create: mockCreate
|
||||
}
|
||||
};
|
||||
const mockAnthropicConstructor = jest.fn().mockImplementation(() => mockAnthropicInstance);
|
||||
return {
|
||||
Anthropic: mockAnthropicConstructor
|
||||
};
|
||||
const mockCreate = jest.fn().mockResolvedValue({
|
||||
content: [{ text: 'AI response' }]
|
||||
});
|
||||
const mockAnthropicInstance = {
|
||||
messages: {
|
||||
create: mockCreate
|
||||
}
|
||||
};
|
||||
const mockAnthropicConstructor = jest
|
||||
.fn()
|
||||
.mockImplementation(() => mockAnthropicInstance);
|
||||
return {
|
||||
Anthropic: mockAnthropicConstructor
|
||||
};
|
||||
});
|
||||
|
||||
// Use jest.fn() directly for OpenAI mock
|
||||
const mockOpenAIInstance = {
|
||||
chat: {
|
||||
completions: {
|
||||
create: jest.fn().mockResolvedValue({
|
||||
choices: [{ message: { content: 'Perplexity response' } }],
|
||||
}),
|
||||
},
|
||||
},
|
||||
chat: {
|
||||
completions: {
|
||||
create: jest.fn().mockResolvedValue({
|
||||
choices: [{ message: { content: 'Perplexity response' } }]
|
||||
})
|
||||
}
|
||||
}
|
||||
};
|
||||
const mockOpenAI = jest.fn().mockImplementation(() => mockOpenAIInstance);
|
||||
|
||||
jest.mock('openai', () => {
|
||||
return { default: mockOpenAI };
|
||||
return { default: mockOpenAI };
|
||||
});
|
||||
|
||||
jest.mock('dotenv', () => ({
|
||||
config: jest.fn(),
|
||||
config: jest.fn()
|
||||
}));
|
||||
|
||||
jest.mock('../../scripts/modules/utils.js', () => ({
|
||||
CONFIG: {
|
||||
model: 'claude-3-sonnet-20240229',
|
||||
temperature: 0.7,
|
||||
maxTokens: 4000,
|
||||
},
|
||||
log: mockLog,
|
||||
sanitizePrompt: jest.fn(text => text),
|
||||
CONFIG: {
|
||||
model: 'claude-3-sonnet-20240229',
|
||||
temperature: 0.7,
|
||||
maxTokens: 4000
|
||||
},
|
||||
log: mockLog,
|
||||
sanitizePrompt: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
jest.mock('../../scripts/modules/ui.js', () => ({
|
||||
startLoadingIndicator: jest.fn().mockReturnValue('mockLoader'),
|
||||
stopLoadingIndicator: jest.fn(),
|
||||
startLoadingIndicator: jest.fn().mockReturnValue('mockLoader'),
|
||||
stopLoadingIndicator: jest.fn()
|
||||
}));
|
||||
|
||||
// Mock anthropic global object
|
||||
global.anthropic = {
|
||||
messages: {
|
||||
create: jest.fn().mockResolvedValue({
|
||||
content: [{ text: '[{"id": 1, "title": "Test", "description": "Test", "dependencies": [], "details": "Test"}]' }],
|
||||
}),
|
||||
},
|
||||
messages: {
|
||||
create: jest.fn().mockResolvedValue({
|
||||
content: [
|
||||
{
|
||||
text: '[{"id": 1, "title": "Test", "description": "Test", "dependencies": [], "details": "Test"}]'
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
// Mock process.env
|
||||
@@ -75,20 +81,20 @@ const originalEnv = process.env;
|
||||
import { Anthropic } from '@anthropic-ai/sdk';
|
||||
|
||||
describe('AI Services Module', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env = { ...originalEnv };
|
||||
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
|
||||
process.env.PERPLEXITY_API_KEY = 'test-perplexity-key';
|
||||
});
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env = { ...originalEnv };
|
||||
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
|
||||
process.env.PERPLEXITY_API_KEY = 'test-perplexity-key';
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
describe('parseSubtasksFromText function', () => {
|
||||
test('should parse subtasks from JSON text', () => {
|
||||
const text = `Here's your list of subtasks:
|
||||
describe('parseSubtasksFromText function', () => {
|
||||
test('should parse subtasks from JSON text', () => {
|
||||
const text = `Here's your list of subtasks:
|
||||
|
||||
[
|
||||
{
|
||||
@@ -109,31 +115,31 @@ describe('AI Services Module', () => {
|
||||
|
||||
These subtasks will help you implement the parent task efficiently.`;
|
||||
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toEqual({
|
||||
id: 1,
|
||||
title: 'Implement database schema',
|
||||
description: 'Design and implement the database schema for user data',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Create tables for users, preferences, and settings',
|
||||
parentTaskId: 5
|
||||
});
|
||||
expect(result[1]).toEqual({
|
||||
id: 2,
|
||||
title: 'Create API endpoints',
|
||||
description: 'Develop RESTful API endpoints for user operations',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Implement CRUD operations for user management',
|
||||
parentTaskId: 5
|
||||
});
|
||||
});
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
test('should handle subtasks with dependencies', () => {
|
||||
const text = `
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toEqual({
|
||||
id: 1,
|
||||
title: 'Implement database schema',
|
||||
description: 'Design and implement the database schema for user data',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Create tables for users, preferences, and settings',
|
||||
parentTaskId: 5
|
||||
});
|
||||
expect(result[1]).toEqual({
|
||||
id: 2,
|
||||
title: 'Create API endpoints',
|
||||
description: 'Develop RESTful API endpoints for user operations',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
details: 'Implement CRUD operations for user management',
|
||||
parentTaskId: 5
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle subtasks with dependencies', () => {
|
||||
const text = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
@@ -151,15 +157,15 @@ These subtasks will help you implement the parent task efficiently.`;
|
||||
}
|
||||
]`;
|
||||
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].dependencies).toEqual([]);
|
||||
expect(result[1].dependencies).toEqual([1]);
|
||||
});
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
test('should handle complex dependency lists', () => {
|
||||
const text = `
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].dependencies).toEqual([]);
|
||||
expect(result[1].dependencies).toEqual([1]);
|
||||
});
|
||||
|
||||
test('should handle complex dependency lists', () => {
|
||||
const text = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
@@ -184,39 +190,39 @@ These subtasks will help you implement the parent task efficiently.`;
|
||||
}
|
||||
]`;
|
||||
|
||||
const result = parseSubtasksFromText(text, 1, 3, 5);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[2].dependencies).toEqual([1, 2]);
|
||||
});
|
||||
const result = parseSubtasksFromText(text, 1, 3, 5);
|
||||
|
||||
test('should create fallback subtasks for empty text', () => {
|
||||
const emptyText = '';
|
||||
|
||||
const result = parseSubtasksFromText(emptyText, 1, 2, 5);
|
||||
|
||||
// Verify fallback subtasks structure
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toMatchObject({
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
description: 'Auto-generated fallback subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 5
|
||||
});
|
||||
expect(result[1]).toMatchObject({
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
description: 'Auto-generated fallback subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 5
|
||||
});
|
||||
});
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[2].dependencies).toEqual([1, 2]);
|
||||
});
|
||||
|
||||
test('should normalize subtask IDs', () => {
|
||||
const text = `
|
||||
test('should create fallback subtasks for empty text', () => {
|
||||
const emptyText = '';
|
||||
|
||||
const result = parseSubtasksFromText(emptyText, 1, 2, 5);
|
||||
|
||||
// Verify fallback subtasks structure
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toMatchObject({
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
description: 'Auto-generated fallback subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 5
|
||||
});
|
||||
expect(result[1]).toMatchObject({
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
description: 'Auto-generated fallback subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 5
|
||||
});
|
||||
});
|
||||
|
||||
test('should normalize subtask IDs', () => {
|
||||
const text = `
|
||||
[
|
||||
{
|
||||
"id": 10,
|
||||
@@ -234,15 +240,15 @@ These subtasks will help you implement the parent task efficiently.`;
|
||||
}
|
||||
]`;
|
||||
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].id).toBe(1); // Should normalize to starting ID
|
||||
expect(result[1].id).toBe(2); // Should normalize to starting ID + 1
|
||||
});
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
test('should convert string dependencies to numbers', () => {
|
||||
const text = `
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].id).toBe(1); // Should normalize to starting ID
|
||||
expect(result[1].id).toBe(2); // Should normalize to starting ID + 1
|
||||
});
|
||||
|
||||
test('should convert string dependencies to numbers', () => {
|
||||
const text = `
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
@@ -260,133 +266,142 @@ These subtasks will help you implement the parent task efficiently.`;
|
||||
}
|
||||
]`;
|
||||
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
expect(result[1].dependencies).toEqual([1]);
|
||||
expect(typeof result[1].dependencies[0]).toBe('number');
|
||||
});
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
test('should create fallback subtasks for invalid JSON', () => {
|
||||
const text = `This is not valid JSON and cannot be parsed`;
|
||||
expect(result[1].dependencies).toEqual([1]);
|
||||
expect(typeof result[1].dependencies[0]).toBe('number');
|
||||
});
|
||||
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
// Verify fallback subtasks structure
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toMatchObject({
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
description: 'Auto-generated fallback subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 5
|
||||
});
|
||||
expect(result[1]).toMatchObject({
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
description: 'Auto-generated fallback subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 5
|
||||
});
|
||||
});
|
||||
});
|
||||
test('should create fallback subtasks for invalid JSON', () => {
|
||||
const text = `This is not valid JSON and cannot be parsed`;
|
||||
|
||||
describe('handleClaudeError function', () => {
|
||||
// Import the function directly for testing
|
||||
let handleClaudeError;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Dynamic import to get the actual function
|
||||
const module = await import('../../scripts/modules/ai-services.js');
|
||||
handleClaudeError = module.handleClaudeError;
|
||||
});
|
||||
const result = parseSubtasksFromText(text, 1, 2, 5);
|
||||
|
||||
test('should handle overloaded_error type', () => {
|
||||
const error = {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'overloaded_error',
|
||||
message: 'Claude is experiencing high volume'
|
||||
}
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('Claude is currently experiencing high demand');
|
||||
expect(result).toContain('overloaded');
|
||||
});
|
||||
// Verify fallback subtasks structure
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toMatchObject({
|
||||
id: 1,
|
||||
title: 'Subtask 1',
|
||||
description: 'Auto-generated fallback subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 5
|
||||
});
|
||||
expect(result[1]).toMatchObject({
|
||||
id: 2,
|
||||
title: 'Subtask 2',
|
||||
description: 'Auto-generated fallback subtask',
|
||||
status: 'pending',
|
||||
dependencies: [],
|
||||
parentTaskId: 5
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle rate_limit_error type', () => {
|
||||
const error = {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'rate_limit_error',
|
||||
message: 'Rate limit exceeded'
|
||||
}
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('exceeded the rate limit');
|
||||
});
|
||||
describe('handleClaudeError function', () => {
|
||||
// Import the function directly for testing
|
||||
let handleClaudeError;
|
||||
|
||||
test('should handle invalid_request_error type', () => {
|
||||
const error = {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'invalid_request_error',
|
||||
message: 'Invalid request parameters'
|
||||
}
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('issue with the request format');
|
||||
});
|
||||
beforeAll(async () => {
|
||||
// Dynamic import to get the actual function
|
||||
const module = await import('../../scripts/modules/ai-services.js');
|
||||
handleClaudeError = module.handleClaudeError;
|
||||
});
|
||||
|
||||
test('should handle timeout errors', () => {
|
||||
const error = {
|
||||
message: 'Request timed out after 60000ms'
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('timed out');
|
||||
});
|
||||
test('should handle overloaded_error type', () => {
|
||||
const error = {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'overloaded_error',
|
||||
message: 'Claude is experiencing high volume'
|
||||
}
|
||||
};
|
||||
|
||||
test('should handle network errors', () => {
|
||||
const error = {
|
||||
message: 'Network error occurred'
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('network error');
|
||||
});
|
||||
// Mock process.env to include PERPLEXITY_API_KEY
|
||||
const originalEnv = process.env;
|
||||
process.env = { ...originalEnv, PERPLEXITY_API_KEY: 'test-key' };
|
||||
|
||||
test('should handle generic errors', () => {
|
||||
const error = {
|
||||
message: 'Something unexpected happened'
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('Error communicating with Claude');
|
||||
expect(result).toContain('Something unexpected happened');
|
||||
});
|
||||
});
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
describe('Anthropic client configuration', () => {
|
||||
test('should include output-128k beta header in client configuration', async () => {
|
||||
// Read the file content to verify the change is present
|
||||
const fs = await import('fs');
|
||||
const path = await import('path');
|
||||
const filePath = path.resolve('./scripts/modules/ai-services.js');
|
||||
const fileContent = fs.readFileSync(filePath, 'utf8');
|
||||
|
||||
// Check if the beta header is in the file
|
||||
expect(fileContent).toContain("'anthropic-beta': 'output-128k-2025-02-19'");
|
||||
});
|
||||
});
|
||||
});
|
||||
// Restore original env
|
||||
process.env = originalEnv;
|
||||
|
||||
expect(result).toContain('Claude is currently overloaded');
|
||||
expect(result).toContain('fall back to Perplexity AI');
|
||||
});
|
||||
|
||||
test('should handle rate_limit_error type', () => {
|
||||
const error = {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'rate_limit_error',
|
||||
message: 'Rate limit exceeded'
|
||||
}
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('exceeded the rate limit');
|
||||
});
|
||||
|
||||
test('should handle invalid_request_error type', () => {
|
||||
const error = {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'invalid_request_error',
|
||||
message: 'Invalid request parameters'
|
||||
}
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('issue with the request format');
|
||||
});
|
||||
|
||||
test('should handle timeout errors', () => {
|
||||
const error = {
|
||||
message: 'Request timed out after 60000ms'
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('timed out');
|
||||
});
|
||||
|
||||
test('should handle network errors', () => {
|
||||
const error = {
|
||||
message: 'Network error occurred'
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('network error');
|
||||
});
|
||||
|
||||
test('should handle generic errors', () => {
|
||||
const error = {
|
||||
message: 'Something unexpected happened'
|
||||
};
|
||||
|
||||
const result = handleClaudeError(error);
|
||||
|
||||
expect(result).toContain('Error communicating with Claude');
|
||||
expect(result).toContain('Something unexpected happened');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Anthropic client configuration', () => {
|
||||
test('should include output-128k beta header in client configuration', async () => {
|
||||
// Read the file content to verify the change is present
|
||||
const fs = await import('fs');
|
||||
const path = await import('path');
|
||||
const filePath = path.resolve('./scripts/modules/ai-services.js');
|
||||
const fileContent = fs.readFileSync(filePath, 'utf8');
|
||||
|
||||
// Check if the beta header is in the file
|
||||
expect(fileContent).toContain(
|
||||
"'anthropic-beta': 'output-128k-2025-02-19'"
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -6,38 +6,44 @@ import { jest } from '@jest/globals';
|
||||
|
||||
// Mock functions that need jest.fn methods
|
||||
const mockParsePRD = jest.fn().mockResolvedValue(undefined);
|
||||
const mockUpdateTaskById = jest.fn().mockResolvedValue({
|
||||
id: 2,
|
||||
title: 'Updated Task',
|
||||
description: 'Updated description'
|
||||
});
|
||||
const mockDisplayBanner = jest.fn();
|
||||
const mockDisplayHelp = jest.fn();
|
||||
const mockLog = jest.fn();
|
||||
|
||||
// Mock modules first
|
||||
jest.mock('fs', () => ({
|
||||
existsSync: jest.fn(),
|
||||
readFileSync: jest.fn()
|
||||
existsSync: jest.fn(),
|
||||
readFileSync: jest.fn()
|
||||
}));
|
||||
|
||||
jest.mock('path', () => ({
|
||||
join: jest.fn((dir, file) => `${dir}/${file}`)
|
||||
join: jest.fn((dir, file) => `${dir}/${file}`)
|
||||
}));
|
||||
|
||||
jest.mock('chalk', () => ({
|
||||
red: jest.fn(text => text),
|
||||
blue: jest.fn(text => text),
|
||||
green: jest.fn(text => text),
|
||||
yellow: jest.fn(text => text),
|
||||
white: jest.fn(text => ({
|
||||
bold: jest.fn(text => text)
|
||||
})),
|
||||
reset: jest.fn(text => text)
|
||||
red: jest.fn((text) => text),
|
||||
blue: jest.fn((text) => text),
|
||||
green: jest.fn((text) => text),
|
||||
yellow: jest.fn((text) => text),
|
||||
white: jest.fn((text) => ({
|
||||
bold: jest.fn((text) => text)
|
||||
})),
|
||||
reset: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
jest.mock('../../scripts/modules/ui.js', () => ({
|
||||
displayBanner: mockDisplayBanner,
|
||||
displayHelp: mockDisplayHelp
|
||||
displayBanner: mockDisplayBanner,
|
||||
displayHelp: mockDisplayHelp
|
||||
}));
|
||||
|
||||
jest.mock('../../scripts/modules/task-manager.js', () => ({
|
||||
parsePRD: mockParsePRD
|
||||
parsePRD: mockParsePRD,
|
||||
updateTaskById: mockUpdateTaskById
|
||||
}));
|
||||
|
||||
// Add this function before the mock of utils.js
|
||||
@@ -47,10 +53,10 @@ jest.mock('../../scripts/modules/task-manager.js', () => ({
|
||||
* @returns {string} kebab-case version of the input
|
||||
*/
|
||||
const toKebabCase = (str) => {
|
||||
return str
|
||||
.replace(/([a-z0-9])([A-Z])/g, '$1-$2')
|
||||
.toLowerCase()
|
||||
.replace(/^-/, ''); // Remove leading hyphen if present
|
||||
return str
|
||||
.replace(/([a-z0-9])([A-Z])/g, '$1-$2')
|
||||
.toLowerCase()
|
||||
.replace(/^-/, ''); // Remove leading hyphen if present
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -59,37 +65,37 @@ const toKebabCase = (str) => {
|
||||
* @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted
|
||||
*/
|
||||
function detectCamelCaseFlags(args) {
|
||||
const camelCaseFlags = [];
|
||||
for (const arg of args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip if it's a single word (no hyphens) or already in kebab-case
|
||||
if (!flagName.includes('-')) {
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return camelCaseFlags;
|
||||
const camelCaseFlags = [];
|
||||
for (const arg of args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip if it's a single word (no hyphens) or already in kebab-case
|
||||
if (!flagName.includes('-')) {
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return camelCaseFlags;
|
||||
}
|
||||
|
||||
// Then update the utils.js mock to include these functions
|
||||
jest.mock('../../scripts/modules/utils.js', () => ({
|
||||
CONFIG: {
|
||||
projectVersion: '1.5.0'
|
||||
},
|
||||
log: mockLog,
|
||||
toKebabCase: toKebabCase,
|
||||
detectCamelCaseFlags: detectCamelCaseFlags
|
||||
CONFIG: {
|
||||
projectVersion: '1.5.0'
|
||||
},
|
||||
log: mockLog,
|
||||
toKebabCase: toKebabCase,
|
||||
detectCamelCaseFlags: detectCamelCaseFlags
|
||||
}));
|
||||
|
||||
// Import all modules after mocking
|
||||
@@ -100,190 +106,592 @@ import { setupCLI } from '../../scripts/modules/commands.js';
|
||||
|
||||
// We'll use a simplified, direct test approach instead of Commander mocking
|
||||
describe('Commands Module', () => {
|
||||
// Set up spies on the mocked modules
|
||||
const mockExistsSync = jest.spyOn(fs, 'existsSync');
|
||||
const mockReadFileSync = jest.spyOn(fs, 'readFileSync');
|
||||
const mockJoin = jest.spyOn(path, 'join');
|
||||
const mockConsoleLog = jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||
const mockConsoleError = jest.spyOn(console, 'error').mockImplementation(() => {});
|
||||
const mockExit = jest.spyOn(process, 'exit').mockImplementation(() => {});
|
||||
// Set up spies on the mocked modules
|
||||
const mockExistsSync = jest.spyOn(fs, 'existsSync');
|
||||
const mockReadFileSync = jest.spyOn(fs, 'readFileSync');
|
||||
const mockJoin = jest.spyOn(path, 'join');
|
||||
const mockConsoleLog = jest
|
||||
.spyOn(console, 'log')
|
||||
.mockImplementation(() => {});
|
||||
const mockConsoleError = jest
|
||||
.spyOn(console, 'error')
|
||||
.mockImplementation(() => {});
|
||||
const mockExit = jest.spyOn(process, 'exit').mockImplementation(() => {});
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
});
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
afterAll(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('setupCLI function', () => {
|
||||
test('should return Commander program instance', () => {
|
||||
const program = setupCLI();
|
||||
expect(program).toBeDefined();
|
||||
expect(program.name()).toBe('dev');
|
||||
});
|
||||
describe('setupCLI function', () => {
|
||||
test('should return Commander program instance', () => {
|
||||
const program = setupCLI();
|
||||
expect(program).toBeDefined();
|
||||
expect(program.name()).toBe('dev');
|
||||
});
|
||||
|
||||
test('should read version from package.json when available', () => {
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue('{"version": "1.0.0"}');
|
||||
mockJoin.mockReturnValue('package.json');
|
||||
|
||||
const program = setupCLI();
|
||||
const version = program._version();
|
||||
expect(mockReadFileSync).toHaveBeenCalledWith('package.json', 'utf8');
|
||||
expect(version).toBe('1.0.0');
|
||||
});
|
||||
test('should read version from package.json when available', () => {
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockReturnValue('{"version": "1.0.0"}');
|
||||
mockJoin.mockReturnValue('package.json');
|
||||
|
||||
test('should use default version when package.json is not available', () => {
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
const program = setupCLI();
|
||||
const version = program._version();
|
||||
expect(mockReadFileSync).not.toHaveBeenCalled();
|
||||
expect(version).toBe('1.5.0');
|
||||
});
|
||||
const program = setupCLI();
|
||||
const version = program._version();
|
||||
expect(mockReadFileSync).toHaveBeenCalledWith('package.json', 'utf8');
|
||||
expect(version).toBe('1.0.0');
|
||||
});
|
||||
|
||||
test('should use default version when package.json reading throws an error', () => {
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockImplementation(() => {
|
||||
throw new Error('Invalid JSON');
|
||||
});
|
||||
|
||||
const program = setupCLI();
|
||||
const version = program._version();
|
||||
expect(mockReadFileSync).toHaveBeenCalled();
|
||||
expect(version).toBe('1.5.0');
|
||||
});
|
||||
});
|
||||
test('should use default version when package.json is not available', () => {
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
describe('Kebab Case Validation', () => {
|
||||
test('should detect camelCase flags correctly', () => {
|
||||
const args = ['node', 'task-master', '--camelCase', '--kebab-case'];
|
||||
const camelCaseFlags = args.filter(arg =>
|
||||
arg.startsWith('--') &&
|
||||
/[A-Z]/.test(arg) &&
|
||||
!arg.includes('-[A-Z]')
|
||||
);
|
||||
expect(camelCaseFlags).toContain('--camelCase');
|
||||
expect(camelCaseFlags).not.toContain('--kebab-case');
|
||||
});
|
||||
const program = setupCLI();
|
||||
const version = program._version();
|
||||
expect(mockReadFileSync).not.toHaveBeenCalled();
|
||||
expect(version).toBe('1.5.0');
|
||||
});
|
||||
|
||||
test('should accept kebab-case flags correctly', () => {
|
||||
const args = ['node', 'task-master', '--kebab-case'];
|
||||
const camelCaseFlags = args.filter(arg =>
|
||||
arg.startsWith('--') &&
|
||||
/[A-Z]/.test(arg) &&
|
||||
!arg.includes('-[A-Z]')
|
||||
);
|
||||
expect(camelCaseFlags).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
test('should use default version when package.json reading throws an error', () => {
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
mockReadFileSync.mockImplementation(() => {
|
||||
throw new Error('Invalid JSON');
|
||||
});
|
||||
|
||||
describe('parse-prd command', () => {
|
||||
// Since mocking Commander is complex, we'll test the action handler directly
|
||||
// Recreate the action handler logic based on commands.js
|
||||
async function parsePrdAction(file, options) {
|
||||
// Use input option if file argument not provided
|
||||
const inputFile = file || options.input;
|
||||
const defaultPrdPath = 'scripts/prd.txt';
|
||||
|
||||
// If no input file specified, check for default PRD location
|
||||
if (!inputFile) {
|
||||
if (fs.existsSync(defaultPrdPath)) {
|
||||
console.log(chalk.blue(`Using default PRD file: ${defaultPrdPath}`));
|
||||
const numTasks = parseInt(options.numTasks, 10);
|
||||
const outputPath = options.output;
|
||||
|
||||
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
|
||||
await mockParsePRD(defaultPrdPath, outputPath, numTasks);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(chalk.yellow('No PRD file specified and default PRD file not found at scripts/prd.txt.'));
|
||||
return;
|
||||
}
|
||||
|
||||
const numTasks = parseInt(options.numTasks, 10);
|
||||
const outputPath = options.output;
|
||||
|
||||
console.log(chalk.blue(`Parsing PRD file: ${inputFile}`));
|
||||
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
|
||||
|
||||
await mockParsePRD(inputFile, outputPath, numTasks);
|
||||
}
|
||||
const program = setupCLI();
|
||||
const version = program._version();
|
||||
expect(mockReadFileSync).toHaveBeenCalled();
|
||||
expect(version).toBe('1.5.0');
|
||||
});
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset the parsePRD mock
|
||||
mockParsePRD.mockClear();
|
||||
});
|
||||
describe('Kebab Case Validation', () => {
|
||||
test('should detect camelCase flags correctly', () => {
|
||||
const args = ['node', 'task-master', '--camelCase', '--kebab-case'];
|
||||
const camelCaseFlags = args.filter(
|
||||
(arg) =>
|
||||
arg.startsWith('--') && /[A-Z]/.test(arg) && !arg.includes('-[A-Z]')
|
||||
);
|
||||
expect(camelCaseFlags).toContain('--camelCase');
|
||||
expect(camelCaseFlags).not.toContain('--kebab-case');
|
||||
});
|
||||
|
||||
test('should use default PRD path when no arguments provided', async () => {
|
||||
// Arrange
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(undefined, { numTasks: '10', output: 'tasks/tasks.json' });
|
||||
|
||||
// Assert
|
||||
expect(mockExistsSync).toHaveBeenCalledWith('scripts/prd.txt');
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining('Using default PRD file'));
|
||||
expect(mockParsePRD).toHaveBeenCalledWith(
|
||||
'scripts/prd.txt',
|
||||
'tasks/tasks.json',
|
||||
10 // Default value from command definition
|
||||
);
|
||||
});
|
||||
test('should accept kebab-case flags correctly', () => {
|
||||
const args = ['node', 'task-master', '--kebab-case'];
|
||||
const camelCaseFlags = args.filter(
|
||||
(arg) =>
|
||||
arg.startsWith('--') && /[A-Z]/.test(arg) && !arg.includes('-[A-Z]')
|
||||
);
|
||||
expect(camelCaseFlags).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
test('should display help when no arguments and no default PRD exists', async () => {
|
||||
// Arrange
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(undefined, { numTasks: '10', output: 'tasks/tasks.json' });
|
||||
|
||||
// Assert
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining('No PRD file specified'));
|
||||
expect(mockParsePRD).not.toHaveBeenCalled();
|
||||
});
|
||||
describe('parse-prd command', () => {
|
||||
// Since mocking Commander is complex, we'll test the action handler directly
|
||||
// Recreate the action handler logic based on commands.js
|
||||
async function parsePrdAction(file, options) {
|
||||
// Use input option if file argument not provided
|
||||
const inputFile = file || options.input;
|
||||
const defaultPrdPath = 'scripts/prd.txt';
|
||||
|
||||
test('should use explicitly provided file path', async () => {
|
||||
// Arrange
|
||||
const testFile = 'test/prd.txt';
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(testFile, { numTasks: '10', output: 'tasks/tasks.json' });
|
||||
|
||||
// Assert
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining(`Parsing PRD file: ${testFile}`));
|
||||
expect(mockParsePRD).toHaveBeenCalledWith(testFile, 'tasks/tasks.json', 10);
|
||||
expect(mockExistsSync).not.toHaveBeenCalledWith('scripts/prd.txt');
|
||||
});
|
||||
// If no input file specified, check for default PRD location
|
||||
if (!inputFile) {
|
||||
if (fs.existsSync(defaultPrdPath)) {
|
||||
console.log(chalk.blue(`Using default PRD file: ${defaultPrdPath}`));
|
||||
const numTasks = parseInt(options.numTasks, 10);
|
||||
const outputPath = options.output;
|
||||
|
||||
test('should use file path from input option when provided', async () => {
|
||||
// Arrange
|
||||
const testFile = 'test/prd.txt';
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(undefined, { input: testFile, numTasks: '10', output: 'tasks/tasks.json' });
|
||||
|
||||
// Assert
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining(`Parsing PRD file: ${testFile}`));
|
||||
expect(mockParsePRD).toHaveBeenCalledWith(testFile, 'tasks/tasks.json', 10);
|
||||
expect(mockExistsSync).not.toHaveBeenCalledWith('scripts/prd.txt');
|
||||
});
|
||||
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
|
||||
await mockParsePRD(defaultPrdPath, outputPath, numTasks);
|
||||
return;
|
||||
}
|
||||
|
||||
test('should respect numTasks and output options', async () => {
|
||||
// Arrange
|
||||
const testFile = 'test/prd.txt';
|
||||
const outputFile = 'custom/output.json';
|
||||
const numTasks = 15;
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(testFile, { numTasks: numTasks.toString(), output: outputFile });
|
||||
|
||||
// Assert
|
||||
expect(mockParsePRD).toHaveBeenCalledWith(testFile, outputFile, numTasks);
|
||||
});
|
||||
});
|
||||
});
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'No PRD file specified and default PRD file not found at scripts/prd.txt.'
|
||||
)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const numTasks = parseInt(options.numTasks, 10);
|
||||
const outputPath = options.output;
|
||||
|
||||
console.log(chalk.blue(`Parsing PRD file: ${inputFile}`));
|
||||
console.log(chalk.blue(`Generating ${numTasks} tasks...`));
|
||||
|
||||
await mockParsePRD(inputFile, outputPath, numTasks);
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset the parsePRD mock
|
||||
mockParsePRD.mockClear();
|
||||
});
|
||||
|
||||
test('should use default PRD path when no arguments provided', async () => {
|
||||
// Arrange
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(undefined, {
|
||||
numTasks: '10',
|
||||
output: 'tasks/tasks.json'
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(mockExistsSync).toHaveBeenCalledWith('scripts/prd.txt');
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Using default PRD file')
|
||||
);
|
||||
expect(mockParsePRD).toHaveBeenCalledWith(
|
||||
'scripts/prd.txt',
|
||||
'tasks/tasks.json',
|
||||
10 // Default value from command definition
|
||||
);
|
||||
});
|
||||
|
||||
test('should display help when no arguments and no default PRD exists', async () => {
|
||||
// Arrange
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(undefined, {
|
||||
numTasks: '10',
|
||||
output: 'tasks/tasks.json'
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(
|
||||
expect.stringContaining('No PRD file specified')
|
||||
);
|
||||
expect(mockParsePRD).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should use explicitly provided file path', async () => {
|
||||
// Arrange
|
||||
const testFile = 'test/prd.txt';
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(testFile, {
|
||||
numTasks: '10',
|
||||
output: 'tasks/tasks.json'
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(
|
||||
expect.stringContaining(`Parsing PRD file: ${testFile}`)
|
||||
);
|
||||
expect(mockParsePRD).toHaveBeenCalledWith(
|
||||
testFile,
|
||||
'tasks/tasks.json',
|
||||
10
|
||||
);
|
||||
expect(mockExistsSync).not.toHaveBeenCalledWith('scripts/prd.txt');
|
||||
});
|
||||
|
||||
test('should use file path from input option when provided', async () => {
|
||||
// Arrange
|
||||
const testFile = 'test/prd.txt';
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(undefined, {
|
||||
input: testFile,
|
||||
numTasks: '10',
|
||||
output: 'tasks/tasks.json'
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(
|
||||
expect.stringContaining(`Parsing PRD file: ${testFile}`)
|
||||
);
|
||||
expect(mockParsePRD).toHaveBeenCalledWith(
|
||||
testFile,
|
||||
'tasks/tasks.json',
|
||||
10
|
||||
);
|
||||
expect(mockExistsSync).not.toHaveBeenCalledWith('scripts/prd.txt');
|
||||
});
|
||||
|
||||
test('should respect numTasks and output options', async () => {
|
||||
// Arrange
|
||||
const testFile = 'test/prd.txt';
|
||||
const outputFile = 'custom/output.json';
|
||||
const numTasks = 15;
|
||||
|
||||
// Act - call the handler directly with the right params
|
||||
await parsePrdAction(testFile, {
|
||||
numTasks: numTasks.toString(),
|
||||
output: outputFile
|
||||
});
|
||||
|
||||
// Assert
|
||||
expect(mockParsePRD).toHaveBeenCalledWith(testFile, outputFile, numTasks);
|
||||
});
|
||||
});
|
||||
|
||||
describe('updateTask command', () => {
|
||||
// Since mocking Commander is complex, we'll test the action handler directly
|
||||
// Recreate the action handler logic based on commands.js
|
||||
async function updateTaskAction(options) {
|
||||
try {
|
||||
const tasksPath = options.file;
|
||||
|
||||
// Validate required parameters
|
||||
if (!options.id) {
|
||||
console.error(chalk.red('Error: --id parameter is required'));
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'Usage example: task-master update-task --id=23 --prompt="Update with new information"'
|
||||
)
|
||||
);
|
||||
process.exit(1);
|
||||
return; // Add early return to prevent calling updateTaskById
|
||||
}
|
||||
|
||||
// Parse the task ID and validate it's a number
|
||||
const taskId = parseInt(options.id, 10);
|
||||
if (isNaN(taskId) || taskId <= 0) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'Usage example: task-master update-task --id=23 --prompt="Update with new information"'
|
||||
)
|
||||
);
|
||||
process.exit(1);
|
||||
return; // Add early return to prevent calling updateTaskById
|
||||
}
|
||||
|
||||
if (!options.prompt) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'Error: --prompt parameter is required. Please provide information about the changes.'
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'Usage example: task-master update-task --id=23 --prompt="Update with new information"'
|
||||
)
|
||||
);
|
||||
process.exit(1);
|
||||
return; // Add early return to prevent calling updateTaskById
|
||||
}
|
||||
|
||||
const prompt = options.prompt;
|
||||
const useResearch = options.research || false;
|
||||
|
||||
// Validate tasks file exists
|
||||
if (!fs.existsSync(tasksPath)) {
|
||||
console.error(
|
||||
chalk.red(`Error: Tasks file not found at path: ${tasksPath}`)
|
||||
);
|
||||
if (tasksPath === 'tasks/tasks.json') {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'Hint: Run task-master init or task-master parse-prd to create tasks.json first'
|
||||
)
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
`Hint: Check if the file path is correct: ${tasksPath}`
|
||||
)
|
||||
);
|
||||
}
|
||||
process.exit(1);
|
||||
return; // Add early return to prevent calling updateTaskById
|
||||
}
|
||||
|
||||
console.log(
|
||||
chalk.blue(`Updating task ${taskId} with prompt: "${prompt}"`)
|
||||
);
|
||||
console.log(chalk.blue(`Tasks file: ${tasksPath}`));
|
||||
|
||||
if (useResearch) {
|
||||
// Verify Perplexity API key exists if using research
|
||||
if (!process.env.PERPLEXITY_API_KEY) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'
|
||||
)
|
||||
);
|
||||
console.log(
|
||||
chalk.yellow('Falling back to Claude AI for task update.')
|
||||
);
|
||||
} else {
|
||||
console.log(
|
||||
chalk.blue('Using Perplexity AI for research-backed task update')
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const result = await mockUpdateTaskById(
|
||||
tasksPath,
|
||||
taskId,
|
||||
prompt,
|
||||
useResearch
|
||||
);
|
||||
|
||||
// If the task wasn't updated (e.g., if it was already marked as done)
|
||||
if (!result) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'\nTask update was not completed. Review the messages above for details.'
|
||||
)
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
// Provide more helpful error messages for common issues
|
||||
if (
|
||||
error.message.includes('task') &&
|
||||
error.message.includes('not found')
|
||||
) {
|
||||
console.log(chalk.yellow('\nTo fix this issue:'));
|
||||
console.log(
|
||||
' 1. Run task-master list to see all available task IDs'
|
||||
);
|
||||
console.log(' 2. Use a valid task ID with the --id parameter');
|
||||
} else if (error.message.includes('API key')) {
|
||||
console.log(
|
||||
chalk.yellow(
|
||||
'\nThis error is related to API keys. Check your environment variables.'
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (true) {
|
||||
// CONFIG.debug
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
// Reset all mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Set up spy for existsSync (already mocked in the outer scope)
|
||||
mockExistsSync.mockReturnValue(true);
|
||||
});
|
||||
|
||||
test('should validate required parameters - missing ID', async () => {
|
||||
// Set up the command options without ID
|
||||
const options = {
|
||||
file: 'test-tasks.json',
|
||||
prompt: 'Update the task'
|
||||
};
|
||||
|
||||
// Call the action directly
|
||||
await updateTaskAction(options);
|
||||
|
||||
// Verify validation error
|
||||
expect(mockConsoleError).toHaveBeenCalledWith(
|
||||
expect.stringContaining('--id parameter is required')
|
||||
);
|
||||
expect(mockExit).toHaveBeenCalledWith(1);
|
||||
expect(mockUpdateTaskById).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should validate required parameters - invalid ID', async () => {
|
||||
// Set up the command options with invalid ID
|
||||
const options = {
|
||||
file: 'test-tasks.json',
|
||||
id: 'not-a-number',
|
||||
prompt: 'Update the task'
|
||||
};
|
||||
|
||||
// Call the action directly
|
||||
await updateTaskAction(options);
|
||||
|
||||
// Verify validation error
|
||||
expect(mockConsoleError).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Invalid task ID')
|
||||
);
|
||||
expect(mockExit).toHaveBeenCalledWith(1);
|
||||
expect(mockUpdateTaskById).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should validate required parameters - missing prompt', async () => {
|
||||
// Set up the command options without prompt
|
||||
const options = {
|
||||
file: 'test-tasks.json',
|
||||
id: '2'
|
||||
};
|
||||
|
||||
// Call the action directly
|
||||
await updateTaskAction(options);
|
||||
|
||||
// Verify validation error
|
||||
expect(mockConsoleError).toHaveBeenCalledWith(
|
||||
expect.stringContaining('--prompt parameter is required')
|
||||
);
|
||||
expect(mockExit).toHaveBeenCalledWith(1);
|
||||
expect(mockUpdateTaskById).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should validate tasks file exists', async () => {
|
||||
// Mock file not existing
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
// Set up the command options
|
||||
const options = {
|
||||
file: 'missing-tasks.json',
|
||||
id: '2',
|
||||
prompt: 'Update the task'
|
||||
};
|
||||
|
||||
// Call the action directly
|
||||
await updateTaskAction(options);
|
||||
|
||||
// Verify validation error
|
||||
expect(mockConsoleError).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Tasks file not found')
|
||||
);
|
||||
expect(mockExit).toHaveBeenCalledWith(1);
|
||||
expect(mockUpdateTaskById).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should call updateTaskById with correct parameters', async () => {
|
||||
// Set up the command options
|
||||
const options = {
|
||||
file: 'test-tasks.json',
|
||||
id: '2',
|
||||
prompt: 'Update the task',
|
||||
research: true
|
||||
};
|
||||
|
||||
// Mock perplexity API key
|
||||
process.env.PERPLEXITY_API_KEY = 'dummy-key';
|
||||
|
||||
// Call the action directly
|
||||
await updateTaskAction(options);
|
||||
|
||||
// Verify updateTaskById was called with correct parameters
|
||||
expect(mockUpdateTaskById).toHaveBeenCalledWith(
|
||||
'test-tasks.json',
|
||||
2,
|
||||
'Update the task',
|
||||
true
|
||||
);
|
||||
|
||||
// Verify console output
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Updating task 2')
|
||||
);
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Using Perplexity AI')
|
||||
);
|
||||
|
||||
// Clean up
|
||||
delete process.env.PERPLEXITY_API_KEY;
|
||||
});
|
||||
|
||||
test('should handle null result from updateTaskById', async () => {
|
||||
// Mock updateTaskById returning null (e.g., task already completed)
|
||||
mockUpdateTaskById.mockResolvedValueOnce(null);
|
||||
|
||||
// Set up the command options
|
||||
const options = {
|
||||
file: 'test-tasks.json',
|
||||
id: '2',
|
||||
prompt: 'Update the task'
|
||||
};
|
||||
|
||||
// Call the action directly
|
||||
await updateTaskAction(options);
|
||||
|
||||
// Verify updateTaskById was called
|
||||
expect(mockUpdateTaskById).toHaveBeenCalled();
|
||||
|
||||
// Verify console output for null result
|
||||
expect(mockConsoleLog).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Task update was not completed')
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle errors from updateTaskById', async () => {
|
||||
// Mock updateTaskById throwing an error
|
||||
mockUpdateTaskById.mockRejectedValueOnce(new Error('Task update failed'));
|
||||
|
||||
// Set up the command options
|
||||
const options = {
|
||||
file: 'test-tasks.json',
|
||||
id: '2',
|
||||
prompt: 'Update the task'
|
||||
};
|
||||
|
||||
// Call the action directly
|
||||
await updateTaskAction(options);
|
||||
|
||||
// Verify error handling
|
||||
expect(mockConsoleError).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Error: Task update failed')
|
||||
);
|
||||
expect(mockExit).toHaveBeenCalledWith(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Test the version comparison utility
|
||||
describe('Version comparison', () => {
|
||||
// Use a dynamic import for the commands module
|
||||
let compareVersions;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Import the function we want to test dynamically
|
||||
const commandsModule = await import('../../scripts/modules/commands.js');
|
||||
compareVersions = commandsModule.compareVersions;
|
||||
});
|
||||
|
||||
test('compareVersions correctly compares semantic versions', () => {
|
||||
expect(compareVersions('1.0.0', '1.0.0')).toBe(0);
|
||||
expect(compareVersions('1.0.0', '1.0.1')).toBe(-1);
|
||||
expect(compareVersions('1.0.1', '1.0.0')).toBe(1);
|
||||
expect(compareVersions('1.0.0', '1.1.0')).toBe(-1);
|
||||
expect(compareVersions('1.1.0', '1.0.0')).toBe(1);
|
||||
expect(compareVersions('1.0.0', '2.0.0')).toBe(-1);
|
||||
expect(compareVersions('2.0.0', '1.0.0')).toBe(1);
|
||||
expect(compareVersions('1.0', '1.0.0')).toBe(0);
|
||||
expect(compareVersions('1.0.0.0', '1.0.0')).toBe(0);
|
||||
expect(compareVersions('1.0.0', '1.0.0.1')).toBe(-1);
|
||||
});
|
||||
});
|
||||
|
||||
// Test the update check functionality
|
||||
describe('Update check', () => {
|
||||
let displayUpgradeNotification;
|
||||
let consoleLogSpy;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Import the function we want to test dynamically
|
||||
const commandsModule = await import('../../scripts/modules/commands.js');
|
||||
displayUpgradeNotification = commandsModule.displayUpgradeNotification;
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
// Spy on console.log
|
||||
consoleLogSpy = jest.spyOn(console, 'log').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
consoleLogSpy.mockRestore();
|
||||
});
|
||||
|
||||
test('displays upgrade notification when newer version is available', () => {
|
||||
// Test displayUpgradeNotification function
|
||||
displayUpgradeNotification('1.0.0', '1.1.0');
|
||||
expect(consoleLogSpy).toHaveBeenCalled();
|
||||
expect(consoleLogSpy.mock.calls[0][0]).toContain('Update Available!');
|
||||
expect(consoleLogSpy.mock.calls[0][0]).toContain('1.0.0');
|
||||
expect(consoleLogSpy.mock.calls[0][0]).toContain('1.1.0');
|
||||
});
|
||||
});
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,142 +5,396 @@ import os from 'os';
|
||||
|
||||
// Mock external modules
|
||||
jest.mock('child_process', () => ({
|
||||
execSync: jest.fn()
|
||||
execSync: jest.fn()
|
||||
}));
|
||||
|
||||
jest.mock('readline', () => ({
|
||||
createInterface: jest.fn(() => ({
|
||||
question: jest.fn(),
|
||||
close: jest.fn()
|
||||
}))
|
||||
createInterface: jest.fn(() => ({
|
||||
question: jest.fn(),
|
||||
close: jest.fn()
|
||||
}))
|
||||
}));
|
||||
|
||||
// Mock figlet for banner display
|
||||
jest.mock('figlet', () => ({
|
||||
default: {
|
||||
textSync: jest.fn(() => 'Task Master')
|
||||
}
|
||||
default: {
|
||||
textSync: jest.fn(() => 'Task Master')
|
||||
}
|
||||
}));
|
||||
|
||||
// Mock console methods
|
||||
jest.mock('console', () => ({
|
||||
log: jest.fn(),
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
clear: jest.fn()
|
||||
log: jest.fn(),
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
clear: jest.fn()
|
||||
}));
|
||||
|
||||
describe('Windsurf Rules File Handling', () => {
|
||||
let tempDir;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Create a temporary directory for testing
|
||||
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
|
||||
|
||||
// Spy on fs methods
|
||||
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
|
||||
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
|
||||
if (filePath.toString().includes('.windsurfrules')) {
|
||||
return 'Existing windsurf rules content';
|
||||
}
|
||||
return '{}';
|
||||
});
|
||||
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
|
||||
// Mock specific file existence checks
|
||||
if (filePath.toString().includes('package.json')) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
|
||||
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
|
||||
});
|
||||
let tempDir;
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up the temporary directory
|
||||
try {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
} catch (err) {
|
||||
console.error(`Error cleaning up: ${err.message}`);
|
||||
}
|
||||
});
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Test function that simulates the behavior of .windsurfrules handling
|
||||
function mockCopyTemplateFile(templateName, targetPath) {
|
||||
if (templateName === 'windsurfrules') {
|
||||
const filename = path.basename(targetPath);
|
||||
|
||||
if (filename === '.windsurfrules') {
|
||||
if (fs.existsSync(targetPath)) {
|
||||
// Should append content when file exists
|
||||
const existingContent = fs.readFileSync(targetPath, 'utf8');
|
||||
const updatedContent = existingContent.trim() +
|
||||
'\n\n# Added by Claude Task Master - Development Workflow Rules\n\n' +
|
||||
'New content';
|
||||
fs.writeFileSync(targetPath, updatedContent);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// If file doesn't exist, create it normally
|
||||
fs.writeFileSync(targetPath, 'New content');
|
||||
}
|
||||
}
|
||||
// Create a temporary directory for testing
|
||||
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
|
||||
|
||||
test('creates .windsurfrules when it does not exist', () => {
|
||||
// Arrange
|
||||
const targetPath = path.join(tempDir, '.windsurfrules');
|
||||
|
||||
// Act
|
||||
mockCopyTemplateFile('windsurfrules', targetPath);
|
||||
|
||||
// Assert
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(targetPath, 'New content');
|
||||
});
|
||||
|
||||
test('appends content to existing .windsurfrules', () => {
|
||||
// Arrange
|
||||
const targetPath = path.join(tempDir, '.windsurfrules');
|
||||
const existingContent = 'Existing windsurf rules content';
|
||||
|
||||
// Override the existsSync mock just for this test
|
||||
fs.existsSync.mockReturnValueOnce(true); // Target file exists
|
||||
fs.readFileSync.mockReturnValueOnce(existingContent);
|
||||
|
||||
// Act
|
||||
mockCopyTemplateFile('windsurfrules', targetPath);
|
||||
|
||||
// Assert
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
targetPath,
|
||||
expect.stringContaining(existingContent)
|
||||
);
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
targetPath,
|
||||
expect.stringContaining('Added by Claude Task Master')
|
||||
);
|
||||
});
|
||||
|
||||
test('includes .windsurfrules in project structure creation', () => {
|
||||
// This test verifies the expected behavior by using a mock implementation
|
||||
// that represents how createProjectStructure should work
|
||||
|
||||
// Mock implementation of createProjectStructure
|
||||
function mockCreateProjectStructure(projectName) {
|
||||
// Copy template files including .windsurfrules
|
||||
mockCopyTemplateFile('windsurfrules', path.join(tempDir, '.windsurfrules'));
|
||||
}
|
||||
|
||||
// Act - call our mock implementation
|
||||
mockCreateProjectStructure('test-project');
|
||||
|
||||
// Assert - verify that .windsurfrules was created
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
path.join(tempDir, '.windsurfrules'),
|
||||
expect.any(String)
|
||||
);
|
||||
});
|
||||
});
|
||||
// Spy on fs methods
|
||||
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
|
||||
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
|
||||
if (filePath.toString().includes('.windsurfrules')) {
|
||||
return 'Existing windsurf rules content';
|
||||
}
|
||||
return '{}';
|
||||
});
|
||||
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
|
||||
// Mock specific file existence checks
|
||||
if (filePath.toString().includes('package.json')) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
|
||||
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up the temporary directory
|
||||
try {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
} catch (err) {
|
||||
console.error(`Error cleaning up: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Test function that simulates the behavior of .windsurfrules handling
|
||||
function mockCopyTemplateFile(templateName, targetPath) {
|
||||
if (templateName === 'windsurfrules') {
|
||||
const filename = path.basename(targetPath);
|
||||
|
||||
if (filename === '.windsurfrules') {
|
||||
if (fs.existsSync(targetPath)) {
|
||||
// Should append content when file exists
|
||||
const existingContent = fs.readFileSync(targetPath, 'utf8');
|
||||
const updatedContent =
|
||||
existingContent.trim() +
|
||||
'\n\n# Added by Claude Task Master - Development Workflow Rules\n\n' +
|
||||
'New content';
|
||||
fs.writeFileSync(targetPath, updatedContent);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// If file doesn't exist, create it normally
|
||||
fs.writeFileSync(targetPath, 'New content');
|
||||
}
|
||||
}
|
||||
|
||||
test('creates .windsurfrules when it does not exist', () => {
|
||||
// Arrange
|
||||
const targetPath = path.join(tempDir, '.windsurfrules');
|
||||
|
||||
// Act
|
||||
mockCopyTemplateFile('windsurfrules', targetPath);
|
||||
|
||||
// Assert
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(targetPath, 'New content');
|
||||
});
|
||||
|
||||
test('appends content to existing .windsurfrules', () => {
|
||||
// Arrange
|
||||
const targetPath = path.join(tempDir, '.windsurfrules');
|
||||
const existingContent = 'Existing windsurf rules content';
|
||||
|
||||
// Override the existsSync mock just for this test
|
||||
fs.existsSync.mockReturnValueOnce(true); // Target file exists
|
||||
fs.readFileSync.mockReturnValueOnce(existingContent);
|
||||
|
||||
// Act
|
||||
mockCopyTemplateFile('windsurfrules', targetPath);
|
||||
|
||||
// Assert
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
targetPath,
|
||||
expect.stringContaining(existingContent)
|
||||
);
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
targetPath,
|
||||
expect.stringContaining('Added by Claude Task Master')
|
||||
);
|
||||
});
|
||||
|
||||
test('includes .windsurfrules in project structure creation', () => {
|
||||
// This test verifies the expected behavior by using a mock implementation
|
||||
// that represents how createProjectStructure should work
|
||||
|
||||
// Mock implementation of createProjectStructure
|
||||
function mockCreateProjectStructure(projectName) {
|
||||
// Copy template files including .windsurfrules
|
||||
mockCopyTemplateFile(
|
||||
'windsurfrules',
|
||||
path.join(tempDir, '.windsurfrules')
|
||||
);
|
||||
}
|
||||
|
||||
// Act - call our mock implementation
|
||||
mockCreateProjectStructure('test-project');
|
||||
|
||||
// Assert - verify that .windsurfrules was created
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
path.join(tempDir, '.windsurfrules'),
|
||||
expect.any(String)
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// New test suite for MCP Configuration Handling
|
||||
describe('MCP Configuration Handling', () => {
|
||||
let tempDir;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Create a temporary directory for testing
|
||||
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
|
||||
|
||||
// Spy on fs methods
|
||||
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
|
||||
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
|
||||
if (filePath.toString().includes('mcp.json')) {
|
||||
return JSON.stringify({
|
||||
mcpServers: {
|
||||
'existing-server': {
|
||||
command: 'node',
|
||||
args: ['server.js']
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return '{}';
|
||||
});
|
||||
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
|
||||
// Return true for specific paths to test different scenarios
|
||||
if (filePath.toString().includes('package.json')) {
|
||||
return true;
|
||||
}
|
||||
// Default to false for other paths
|
||||
return false;
|
||||
});
|
||||
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
|
||||
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Clean up the temporary directory
|
||||
try {
|
||||
fs.rmSync(tempDir, { recursive: true, force: true });
|
||||
} catch (err) {
|
||||
console.error(`Error cleaning up: ${err.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Test function that simulates the behavior of setupMCPConfiguration
|
||||
function mockSetupMCPConfiguration(targetDir, projectName) {
|
||||
const mcpDirPath = path.join(targetDir, '.cursor');
|
||||
const mcpJsonPath = path.join(mcpDirPath, 'mcp.json');
|
||||
|
||||
// Create .cursor directory if it doesn't exist
|
||||
if (!fs.existsSync(mcpDirPath)) {
|
||||
fs.mkdirSync(mcpDirPath, { recursive: true });
|
||||
}
|
||||
|
||||
// New MCP config to be added - references the installed package
|
||||
const newMCPServer = {
|
||||
'task-master-ai': {
|
||||
command: 'npx',
|
||||
args: ['task-master-ai', 'mcp-server']
|
||||
}
|
||||
};
|
||||
|
||||
// Check if mcp.json already exists
|
||||
if (fs.existsSync(mcpJsonPath)) {
|
||||
try {
|
||||
// Read existing config
|
||||
const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, 'utf8'));
|
||||
|
||||
// Initialize mcpServers if it doesn't exist
|
||||
if (!mcpConfig.mcpServers) {
|
||||
mcpConfig.mcpServers = {};
|
||||
}
|
||||
|
||||
// Add the taskmaster-ai server if it doesn't exist
|
||||
if (!mcpConfig.mcpServers['task-master-ai']) {
|
||||
mcpConfig.mcpServers['task-master-ai'] =
|
||||
newMCPServer['task-master-ai'];
|
||||
}
|
||||
|
||||
// Write the updated configuration
|
||||
fs.writeFileSync(mcpJsonPath, JSON.stringify(mcpConfig, null, 4));
|
||||
} catch (error) {
|
||||
// Create new configuration on error
|
||||
const newMCPConfig = {
|
||||
mcpServers: newMCPServer
|
||||
};
|
||||
|
||||
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
|
||||
}
|
||||
} else {
|
||||
// If mcp.json doesn't exist, create it
|
||||
const newMCPConfig = {
|
||||
mcpServers: newMCPServer
|
||||
};
|
||||
|
||||
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
|
||||
}
|
||||
}
|
||||
|
||||
test('creates mcp.json when it does not exist', () => {
|
||||
// Arrange
|
||||
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
|
||||
|
||||
// Act
|
||||
mockSetupMCPConfiguration(tempDir, 'test-project');
|
||||
|
||||
// Assert
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
mcpJsonPath,
|
||||
expect.stringContaining('task-master-ai')
|
||||
);
|
||||
|
||||
// Should create a proper structure with mcpServers key
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
mcpJsonPath,
|
||||
expect.stringContaining('mcpServers')
|
||||
);
|
||||
|
||||
// Should reference npx command
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
mcpJsonPath,
|
||||
expect.stringContaining('npx')
|
||||
);
|
||||
});
|
||||
|
||||
test('updates existing mcp.json by adding new server', () => {
|
||||
// Arrange
|
||||
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
|
||||
|
||||
// Override the existsSync mock to simulate mcp.json exists
|
||||
fs.existsSync.mockImplementation((filePath) => {
|
||||
if (filePath.toString().includes('mcp.json')) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
// Act
|
||||
mockSetupMCPConfiguration(tempDir, 'test-project');
|
||||
|
||||
// Assert
|
||||
// Should preserve existing server
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
mcpJsonPath,
|
||||
expect.stringContaining('existing-server')
|
||||
);
|
||||
|
||||
// Should add our new server
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
mcpJsonPath,
|
||||
expect.stringContaining('task-master-ai')
|
||||
);
|
||||
});
|
||||
|
||||
test('handles JSON parsing errors by creating new mcp.json', () => {
|
||||
// Arrange
|
||||
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
|
||||
|
||||
// Override existsSync to say mcp.json exists
|
||||
fs.existsSync.mockImplementation((filePath) => {
|
||||
if (filePath.toString().includes('mcp.json')) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
// But make readFileSync return invalid JSON
|
||||
fs.readFileSync.mockImplementation((filePath) => {
|
||||
if (filePath.toString().includes('mcp.json')) {
|
||||
return '{invalid json';
|
||||
}
|
||||
return '{}';
|
||||
});
|
||||
|
||||
// Act
|
||||
mockSetupMCPConfiguration(tempDir, 'test-project');
|
||||
|
||||
// Assert
|
||||
// Should create a new valid JSON file with our server
|
||||
expect(fs.writeFileSync).toHaveBeenCalledWith(
|
||||
mcpJsonPath,
|
||||
expect.stringContaining('task-master-ai')
|
||||
);
|
||||
});
|
||||
|
||||
test('does not modify existing server configuration if it already exists', () => {
|
||||
// Arrange
|
||||
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
|
||||
|
||||
// Override existsSync to say mcp.json exists
|
||||
fs.existsSync.mockImplementation((filePath) => {
|
||||
if (filePath.toString().includes('mcp.json')) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
// Return JSON that already has task-master-ai
|
||||
fs.readFileSync.mockImplementation((filePath) => {
|
||||
if (filePath.toString().includes('mcp.json')) {
|
||||
return JSON.stringify({
|
||||
mcpServers: {
|
||||
'existing-server': {
|
||||
command: 'node',
|
||||
args: ['server.js']
|
||||
},
|
||||
'task-master-ai': {
|
||||
command: 'custom',
|
||||
args: ['custom-args']
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return '{}';
|
||||
});
|
||||
|
||||
// Spy to check what's written
|
||||
const writeFileSyncSpy = jest.spyOn(fs, 'writeFileSync');
|
||||
|
||||
// Act
|
||||
mockSetupMCPConfiguration(tempDir, 'test-project');
|
||||
|
||||
// Assert
|
||||
// Verify the written data contains the original taskmaster configuration
|
||||
const dataWritten = JSON.parse(writeFileSyncSpy.mock.calls[0][1]);
|
||||
expect(dataWritten.mcpServers['task-master-ai'].command).toBe('custom');
|
||||
expect(dataWritten.mcpServers['task-master-ai'].args).toContain(
|
||||
'custom-args'
|
||||
);
|
||||
});
|
||||
|
||||
test('creates the .cursor directory if it doesnt exist', () => {
|
||||
// Arrange
|
||||
const cursorDirPath = path.join(tempDir, '.cursor');
|
||||
|
||||
// Make sure it looks like the directory doesn't exist
|
||||
fs.existsSync.mockReturnValue(false);
|
||||
|
||||
// Act
|
||||
mockSetupMCPConfiguration(tempDir, 'test-project');
|
||||
|
||||
// Assert
|
||||
expect(fs.mkdirSync).toHaveBeenCalledWith(cursorDirPath, {
|
||||
recursive: true
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -7,114 +7,126 @@ import { toKebabCase } from '../../scripts/modules/utils.js';
|
||||
|
||||
// Create a test implementation of detectCamelCaseFlags
|
||||
function testDetectCamelCaseFlags(args) {
|
||||
const camelCaseFlags = [];
|
||||
for (const arg of args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip single-word flags - they can't be camelCase
|
||||
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return camelCaseFlags;
|
||||
const camelCaseFlags = [];
|
||||
for (const arg of args) {
|
||||
if (arg.startsWith('--')) {
|
||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||
|
||||
// Skip single-word flags - they can't be camelCase
|
||||
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||
if (/[a-z][A-Z]/.test(flagName)) {
|
||||
const kebabVersion = toKebabCase(flagName);
|
||||
if (kebabVersion !== flagName) {
|
||||
camelCaseFlags.push({
|
||||
original: flagName,
|
||||
kebabCase: kebabVersion
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return camelCaseFlags;
|
||||
}
|
||||
|
||||
describe('Kebab Case Validation', () => {
|
||||
describe('toKebabCase', () => {
|
||||
test('should convert camelCase to kebab-case', () => {
|
||||
expect(toKebabCase('promptText')).toBe('prompt-text');
|
||||
expect(toKebabCase('userID')).toBe('user-id');
|
||||
expect(toKebabCase('numTasks')).toBe('num-tasks');
|
||||
});
|
||||
|
||||
test('should handle already kebab-case strings', () => {
|
||||
expect(toKebabCase('already-kebab-case')).toBe('already-kebab-case');
|
||||
expect(toKebabCase('kebab-case')).toBe('kebab-case');
|
||||
});
|
||||
|
||||
test('should handle single words', () => {
|
||||
expect(toKebabCase('single')).toBe('single');
|
||||
expect(toKebabCase('file')).toBe('file');
|
||||
});
|
||||
});
|
||||
describe('toKebabCase', () => {
|
||||
test('should convert camelCase to kebab-case', () => {
|
||||
expect(toKebabCase('promptText')).toBe('prompt-text');
|
||||
expect(toKebabCase('userID')).toBe('user-id');
|
||||
expect(toKebabCase('numTasks')).toBe('num-tasks');
|
||||
});
|
||||
|
||||
describe('detectCamelCaseFlags', () => {
|
||||
test('should properly detect camelCase flags', () => {
|
||||
const args = ['node', 'task-master', 'add-task', '--promptText=test', '--userID=123'];
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(2);
|
||||
expect(flags).toContainEqual({
|
||||
original: 'promptText',
|
||||
kebabCase: 'prompt-text'
|
||||
});
|
||||
expect(flags).toContainEqual({
|
||||
original: 'userID',
|
||||
kebabCase: 'user-id'
|
||||
});
|
||||
});
|
||||
|
||||
test('should not flag kebab-case or lowercase flags', () => {
|
||||
const args = ['node', 'task-master', 'add-task', '--prompt=test', '--user-id=123'];
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(0);
|
||||
});
|
||||
|
||||
test('should not flag any single-word flags regardless of case', () => {
|
||||
const args = [
|
||||
'node',
|
||||
'task-master',
|
||||
'add-task',
|
||||
'--prompt=test', // lowercase
|
||||
'--PROMPT=test', // uppercase
|
||||
'--Prompt=test', // mixed case
|
||||
'--file=test', // lowercase
|
||||
'--FILE=test', // uppercase
|
||||
'--File=test' // mixed case
|
||||
];
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(0);
|
||||
});
|
||||
test('should handle already kebab-case strings', () => {
|
||||
expect(toKebabCase('already-kebab-case')).toBe('already-kebab-case');
|
||||
expect(toKebabCase('kebab-case')).toBe('kebab-case');
|
||||
});
|
||||
|
||||
test('should handle mixed case flags correctly', () => {
|
||||
const args = [
|
||||
'node',
|
||||
'task-master',
|
||||
'add-task',
|
||||
'--prompt=test', // single word, should pass
|
||||
'--promptText=test', // camelCase, should flag
|
||||
'--prompt-text=test', // kebab-case, should pass
|
||||
'--ID=123', // single word, should pass
|
||||
'--userId=123', // camelCase, should flag
|
||||
'--user-id=123' // kebab-case, should pass
|
||||
];
|
||||
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(2);
|
||||
expect(flags).toContainEqual({
|
||||
original: 'promptText',
|
||||
kebabCase: 'prompt-text'
|
||||
});
|
||||
expect(flags).toContainEqual({
|
||||
original: 'userId',
|
||||
kebabCase: 'user-id'
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
test('should handle single words', () => {
|
||||
expect(toKebabCase('single')).toBe('single');
|
||||
expect(toKebabCase('file')).toBe('file');
|
||||
});
|
||||
});
|
||||
|
||||
describe('detectCamelCaseFlags', () => {
|
||||
test('should properly detect camelCase flags', () => {
|
||||
const args = [
|
||||
'node',
|
||||
'task-master',
|
||||
'add-task',
|
||||
'--promptText=test',
|
||||
'--userID=123'
|
||||
];
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(2);
|
||||
expect(flags).toContainEqual({
|
||||
original: 'promptText',
|
||||
kebabCase: 'prompt-text'
|
||||
});
|
||||
expect(flags).toContainEqual({
|
||||
original: 'userID',
|
||||
kebabCase: 'user-id'
|
||||
});
|
||||
});
|
||||
|
||||
test('should not flag kebab-case or lowercase flags', () => {
|
||||
const args = [
|
||||
'node',
|
||||
'task-master',
|
||||
'add-task',
|
||||
'--prompt=test',
|
||||
'--user-id=123'
|
||||
];
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(0);
|
||||
});
|
||||
|
||||
test('should not flag any single-word flags regardless of case', () => {
|
||||
const args = [
|
||||
'node',
|
||||
'task-master',
|
||||
'add-task',
|
||||
'--prompt=test', // lowercase
|
||||
'--PROMPT=test', // uppercase
|
||||
'--Prompt=test', // mixed case
|
||||
'--file=test', // lowercase
|
||||
'--FILE=test', // uppercase
|
||||
'--File=test' // mixed case
|
||||
];
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(0);
|
||||
});
|
||||
|
||||
test('should handle mixed case flags correctly', () => {
|
||||
const args = [
|
||||
'node',
|
||||
'task-master',
|
||||
'add-task',
|
||||
'--prompt=test', // single word, should pass
|
||||
'--promptText=test', // camelCase, should flag
|
||||
'--prompt-text=test', // kebab-case, should pass
|
||||
'--ID=123', // single word, should pass
|
||||
'--userId=123', // camelCase, should flag
|
||||
'--user-id=123' // kebab-case, should pass
|
||||
];
|
||||
|
||||
const flags = testDetectCamelCaseFlags(args);
|
||||
|
||||
expect(flags).toHaveLength(2);
|
||||
expect(flags).toContainEqual({
|
||||
original: 'promptText',
|
||||
kebabCase: 'prompt-text'
|
||||
});
|
||||
expect(flags).toContainEqual({
|
||||
original: 'userId',
|
||||
kebabCase: 'user-id'
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -6,45 +6,45 @@ import { findTaskById } from '../../scripts/modules/utils.js';
|
||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
||||
|
||||
describe('Task Finder', () => {
|
||||
describe('findTaskById function', () => {
|
||||
test('should find a task by numeric ID', () => {
|
||||
const task = findTaskById(sampleTasks.tasks, 2);
|
||||
expect(task).toBeDefined();
|
||||
expect(task.id).toBe(2);
|
||||
expect(task.title).toBe('Create Core Functionality');
|
||||
});
|
||||
describe('findTaskById function', () => {
|
||||
test('should find a task by numeric ID', () => {
|
||||
const task = findTaskById(sampleTasks.tasks, 2);
|
||||
expect(task).toBeDefined();
|
||||
expect(task.id).toBe(2);
|
||||
expect(task.title).toBe('Create Core Functionality');
|
||||
});
|
||||
|
||||
test('should find a task by string ID', () => {
|
||||
const task = findTaskById(sampleTasks.tasks, '2');
|
||||
expect(task).toBeDefined();
|
||||
expect(task.id).toBe(2);
|
||||
});
|
||||
test('should find a task by string ID', () => {
|
||||
const task = findTaskById(sampleTasks.tasks, '2');
|
||||
expect(task).toBeDefined();
|
||||
expect(task.id).toBe(2);
|
||||
});
|
||||
|
||||
test('should find a subtask using dot notation', () => {
|
||||
const subtask = findTaskById(sampleTasks.tasks, '3.1');
|
||||
expect(subtask).toBeDefined();
|
||||
expect(subtask.id).toBe(1);
|
||||
expect(subtask.title).toBe('Create Header Component');
|
||||
});
|
||||
test('should find a subtask using dot notation', () => {
|
||||
const subtask = findTaskById(sampleTasks.tasks, '3.1');
|
||||
expect(subtask).toBeDefined();
|
||||
expect(subtask.id).toBe(1);
|
||||
expect(subtask.title).toBe('Create Header Component');
|
||||
});
|
||||
|
||||
test('should return null for non-existent task ID', () => {
|
||||
const task = findTaskById(sampleTasks.tasks, 99);
|
||||
expect(task).toBeNull();
|
||||
});
|
||||
test('should return null for non-existent task ID', () => {
|
||||
const task = findTaskById(sampleTasks.tasks, 99);
|
||||
expect(task).toBeNull();
|
||||
});
|
||||
|
||||
test('should return null for non-existent subtask ID', () => {
|
||||
const subtask = findTaskById(sampleTasks.tasks, '3.99');
|
||||
expect(subtask).toBeNull();
|
||||
});
|
||||
test('should return null for non-existent subtask ID', () => {
|
||||
const subtask = findTaskById(sampleTasks.tasks, '3.99');
|
||||
expect(subtask).toBeNull();
|
||||
});
|
||||
|
||||
test('should return null for non-existent parent task ID in subtask notation', () => {
|
||||
const subtask = findTaskById(sampleTasks.tasks, '99.1');
|
||||
expect(subtask).toBeNull();
|
||||
});
|
||||
test('should return null for non-existent parent task ID in subtask notation', () => {
|
||||
const subtask = findTaskById(sampleTasks.tasks, '99.1');
|
||||
expect(subtask).toBeNull();
|
||||
});
|
||||
|
||||
test('should return null when tasks array is empty', () => {
|
||||
const task = findTaskById(emptySampleTasks.tasks, 1);
|
||||
expect(task).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
test('should return null when tasks array is empty', () => {
|
||||
const task = findTaskById(emptySampleTasks.tasks, 1);
|
||||
expect(task).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,226 +3,228 @@
|
||||
*/
|
||||
|
||||
import { jest } from '@jest/globals';
|
||||
import {
|
||||
getStatusWithColor,
|
||||
formatDependenciesWithStatus,
|
||||
createProgressBar,
|
||||
getComplexityWithColor
|
||||
import {
|
||||
getStatusWithColor,
|
||||
formatDependenciesWithStatus,
|
||||
createProgressBar,
|
||||
getComplexityWithColor
|
||||
} from '../../scripts/modules/ui.js';
|
||||
import { sampleTasks } from '../fixtures/sample-tasks.js';
|
||||
|
||||
// Mock dependencies
|
||||
jest.mock('chalk', () => {
|
||||
const origChalkFn = text => text;
|
||||
const chalk = origChalkFn;
|
||||
chalk.green = text => text; // Return text as-is for status functions
|
||||
chalk.yellow = text => text;
|
||||
chalk.red = text => text;
|
||||
chalk.cyan = text => text;
|
||||
chalk.blue = text => text;
|
||||
chalk.gray = text => text;
|
||||
chalk.white = text => text;
|
||||
chalk.bold = text => text;
|
||||
chalk.dim = text => text;
|
||||
|
||||
// Add hex and other methods
|
||||
chalk.hex = () => origChalkFn;
|
||||
chalk.rgb = () => origChalkFn;
|
||||
|
||||
return chalk;
|
||||
const origChalkFn = (text) => text;
|
||||
const chalk = origChalkFn;
|
||||
chalk.green = (text) => text; // Return text as-is for status functions
|
||||
chalk.yellow = (text) => text;
|
||||
chalk.red = (text) => text;
|
||||
chalk.cyan = (text) => text;
|
||||
chalk.blue = (text) => text;
|
||||
chalk.gray = (text) => text;
|
||||
chalk.white = (text) => text;
|
||||
chalk.bold = (text) => text;
|
||||
chalk.dim = (text) => text;
|
||||
|
||||
// Add hex and other methods
|
||||
chalk.hex = () => origChalkFn;
|
||||
chalk.rgb = () => origChalkFn;
|
||||
|
||||
return chalk;
|
||||
});
|
||||
|
||||
jest.mock('figlet', () => ({
|
||||
textSync: jest.fn(() => 'Task Master Banner'),
|
||||
textSync: jest.fn(() => 'Task Master Banner')
|
||||
}));
|
||||
|
||||
jest.mock('boxen', () => jest.fn(text => `[boxed: ${text}]`));
|
||||
jest.mock('boxen', () => jest.fn((text) => `[boxed: ${text}]`));
|
||||
|
||||
jest.mock('ora', () => jest.fn(() => ({
|
||||
start: jest.fn(),
|
||||
succeed: jest.fn(),
|
||||
fail: jest.fn(),
|
||||
stop: jest.fn(),
|
||||
})));
|
||||
jest.mock('ora', () =>
|
||||
jest.fn(() => ({
|
||||
start: jest.fn(),
|
||||
succeed: jest.fn(),
|
||||
fail: jest.fn(),
|
||||
stop: jest.fn()
|
||||
}))
|
||||
);
|
||||
|
||||
jest.mock('cli-table3', () => jest.fn().mockImplementation(() => ({
|
||||
push: jest.fn(),
|
||||
toString: jest.fn(() => 'Table Content'),
|
||||
})));
|
||||
jest.mock('cli-table3', () =>
|
||||
jest.fn().mockImplementation(() => ({
|
||||
push: jest.fn(),
|
||||
toString: jest.fn(() => 'Table Content')
|
||||
}))
|
||||
);
|
||||
|
||||
jest.mock('gradient-string', () => jest.fn(() => jest.fn(text => text)));
|
||||
jest.mock('gradient-string', () => jest.fn(() => jest.fn((text) => text)));
|
||||
|
||||
jest.mock('../../scripts/modules/utils.js', () => ({
|
||||
CONFIG: {
|
||||
projectName: 'Test Project',
|
||||
projectVersion: '1.0.0',
|
||||
},
|
||||
log: jest.fn(),
|
||||
findTaskById: jest.fn(),
|
||||
readJSON: jest.fn(),
|
||||
readComplexityReport: jest.fn(),
|
||||
truncate: jest.fn(text => text),
|
||||
CONFIG: {
|
||||
projectName: 'Test Project',
|
||||
projectVersion: '1.0.0'
|
||||
},
|
||||
log: jest.fn(),
|
||||
findTaskById: jest.fn(),
|
||||
readJSON: jest.fn(),
|
||||
readComplexityReport: jest.fn(),
|
||||
truncate: jest.fn((text) => text)
|
||||
}));
|
||||
|
||||
jest.mock('../../scripts/modules/task-manager.js', () => ({
|
||||
findNextTask: jest.fn(),
|
||||
analyzeTaskComplexity: jest.fn(),
|
||||
findNextTask: jest.fn(),
|
||||
analyzeTaskComplexity: jest.fn()
|
||||
}));
|
||||
|
||||
describe('UI Module', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('getStatusWithColor function', () => {
|
||||
test('should return done status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('done');
|
||||
expect(result).toMatch(/done/);
|
||||
expect(result).toContain('✅');
|
||||
});
|
||||
describe('getStatusWithColor function', () => {
|
||||
test('should return done status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('done');
|
||||
expect(result).toMatch(/done/);
|
||||
expect(result).toContain('✅');
|
||||
});
|
||||
|
||||
test('should return pending status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('pending');
|
||||
expect(result).toMatch(/pending/);
|
||||
expect(result).toContain('⏱️');
|
||||
});
|
||||
test('should return pending status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('pending');
|
||||
expect(result).toMatch(/pending/);
|
||||
expect(result).toContain('⏱️');
|
||||
});
|
||||
|
||||
test('should return deferred status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('deferred');
|
||||
expect(result).toMatch(/deferred/);
|
||||
expect(result).toContain('⏱️');
|
||||
});
|
||||
test('should return deferred status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('deferred');
|
||||
expect(result).toMatch(/deferred/);
|
||||
expect(result).toContain('⏱️');
|
||||
});
|
||||
|
||||
test('should return in-progress status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('in-progress');
|
||||
expect(result).toMatch(/in-progress/);
|
||||
expect(result).toContain('🔄');
|
||||
});
|
||||
test('should return in-progress status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('in-progress');
|
||||
expect(result).toMatch(/in-progress/);
|
||||
expect(result).toContain('🔄');
|
||||
});
|
||||
|
||||
test('should return unknown status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('unknown');
|
||||
expect(result).toMatch(/unknown/);
|
||||
expect(result).toContain('❌');
|
||||
});
|
||||
|
||||
test('should use simple icons when forTable is true', () => {
|
||||
const doneResult = getStatusWithColor('done', true);
|
||||
expect(doneResult).toMatch(/done/);
|
||||
expect(doneResult).toContain('✓');
|
||||
|
||||
const pendingResult = getStatusWithColor('pending', true);
|
||||
expect(pendingResult).toMatch(/pending/);
|
||||
expect(pendingResult).toContain('○');
|
||||
|
||||
const inProgressResult = getStatusWithColor('in-progress', true);
|
||||
expect(inProgressResult).toMatch(/in-progress/);
|
||||
expect(inProgressResult).toContain('►');
|
||||
|
||||
const deferredResult = getStatusWithColor('deferred', true);
|
||||
expect(deferredResult).toMatch(/deferred/);
|
||||
expect(deferredResult).toContain('x');
|
||||
});
|
||||
});
|
||||
test('should return unknown status with emoji for console output', () => {
|
||||
const result = getStatusWithColor('unknown');
|
||||
expect(result).toMatch(/unknown/);
|
||||
expect(result).toContain('❌');
|
||||
});
|
||||
|
||||
describe('formatDependenciesWithStatus function', () => {
|
||||
test('should format dependencies as plain IDs when forConsole is false (default)', () => {
|
||||
const dependencies = [1, 2, 3];
|
||||
const allTasks = [
|
||||
{ id: 1, status: 'done' },
|
||||
{ id: 2, status: 'pending' },
|
||||
{ id: 3, status: 'deferred' }
|
||||
];
|
||||
test('should use simple icons when forTable is true', () => {
|
||||
const doneResult = getStatusWithColor('done', true);
|
||||
expect(doneResult).toMatch(/done/);
|
||||
expect(doneResult).toContain('✓');
|
||||
|
||||
const result = formatDependenciesWithStatus(dependencies, allTasks);
|
||||
|
||||
// With recent changes, we expect just plain IDs when forConsole is false
|
||||
expect(result).toBe('1, 2, 3');
|
||||
});
|
||||
const pendingResult = getStatusWithColor('pending', true);
|
||||
expect(pendingResult).toMatch(/pending/);
|
||||
expect(pendingResult).toContain('○');
|
||||
|
||||
test('should format dependencies with status indicators when forConsole is true', () => {
|
||||
const dependencies = [1, 2, 3];
|
||||
const allTasks = [
|
||||
{ id: 1, status: 'done' },
|
||||
{ id: 2, status: 'pending' },
|
||||
{ id: 3, status: 'deferred' }
|
||||
];
|
||||
|
||||
const result = formatDependenciesWithStatus(dependencies, allTasks, true);
|
||||
|
||||
// We can't test for exact color formatting due to our chalk mocks
|
||||
// Instead, test that the result contains all the expected IDs
|
||||
expect(result).toContain('1');
|
||||
expect(result).toContain('2');
|
||||
expect(result).toContain('3');
|
||||
|
||||
// Test that it's a comma-separated list
|
||||
expect(result.split(', ').length).toBe(3);
|
||||
});
|
||||
const inProgressResult = getStatusWithColor('in-progress', true);
|
||||
expect(inProgressResult).toMatch(/in-progress/);
|
||||
expect(inProgressResult).toContain('►');
|
||||
|
||||
test('should return "None" for empty dependencies', () => {
|
||||
const result = formatDependenciesWithStatus([], []);
|
||||
expect(result).toBe('None');
|
||||
});
|
||||
const deferredResult = getStatusWithColor('deferred', true);
|
||||
expect(deferredResult).toMatch(/deferred/);
|
||||
expect(deferredResult).toContain('x');
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle missing tasks in the task list', () => {
|
||||
const dependencies = [1, 999];
|
||||
const allTasks = [
|
||||
{ id: 1, status: 'done' }
|
||||
];
|
||||
describe('formatDependenciesWithStatus function', () => {
|
||||
test('should format dependencies as plain IDs when forConsole is false (default)', () => {
|
||||
const dependencies = [1, 2, 3];
|
||||
const allTasks = [
|
||||
{ id: 1, status: 'done' },
|
||||
{ id: 2, status: 'pending' },
|
||||
{ id: 3, status: 'deferred' }
|
||||
];
|
||||
|
||||
const result = formatDependenciesWithStatus(dependencies, allTasks);
|
||||
expect(result).toBe('1, 999 (Not found)');
|
||||
});
|
||||
});
|
||||
const result = formatDependenciesWithStatus(dependencies, allTasks);
|
||||
|
||||
describe('createProgressBar function', () => {
|
||||
test('should create a progress bar with the correct percentage', () => {
|
||||
const result = createProgressBar(50, 10);
|
||||
expect(result).toBe('█████░░░░░ 50%');
|
||||
});
|
||||
// With recent changes, we expect just plain IDs when forConsole is false
|
||||
expect(result).toBe('1, 2, 3');
|
||||
});
|
||||
|
||||
test('should handle 0% progress', () => {
|
||||
const result = createProgressBar(0, 10);
|
||||
expect(result).toBe('░░░░░░░░░░ 0%');
|
||||
});
|
||||
test('should format dependencies with status indicators when forConsole is true', () => {
|
||||
const dependencies = [1, 2, 3];
|
||||
const allTasks = [
|
||||
{ id: 1, status: 'done' },
|
||||
{ id: 2, status: 'pending' },
|
||||
{ id: 3, status: 'deferred' }
|
||||
];
|
||||
|
||||
test('should handle 100% progress', () => {
|
||||
const result = createProgressBar(100, 10);
|
||||
expect(result).toBe('██████████ 100%');
|
||||
});
|
||||
const result = formatDependenciesWithStatus(dependencies, allTasks, true);
|
||||
|
||||
test('should handle invalid percentages by clamping', () => {
|
||||
const result1 = createProgressBar(0, 10); // -10 should clamp to 0
|
||||
expect(result1).toBe('░░░░░░░░░░ 0%');
|
||||
|
||||
const result2 = createProgressBar(100, 10); // 150 should clamp to 100
|
||||
expect(result2).toBe('██████████ 100%');
|
||||
});
|
||||
});
|
||||
// We can't test for exact color formatting due to our chalk mocks
|
||||
// Instead, test that the result contains all the expected IDs
|
||||
expect(result).toContain('1');
|
||||
expect(result).toContain('2');
|
||||
expect(result).toContain('3');
|
||||
|
||||
describe('getComplexityWithColor function', () => {
|
||||
test('should return high complexity in red', () => {
|
||||
const result = getComplexityWithColor(8);
|
||||
expect(result).toMatch(/8/);
|
||||
expect(result).toContain('🔴');
|
||||
});
|
||||
// Test that it's a comma-separated list
|
||||
expect(result.split(', ').length).toBe(3);
|
||||
});
|
||||
|
||||
test('should return medium complexity in yellow', () => {
|
||||
const result = getComplexityWithColor(5);
|
||||
expect(result).toMatch(/5/);
|
||||
expect(result).toContain('🟡');
|
||||
});
|
||||
test('should return "None" for empty dependencies', () => {
|
||||
const result = formatDependenciesWithStatus([], []);
|
||||
expect(result).toBe('None');
|
||||
});
|
||||
|
||||
test('should return low complexity in green', () => {
|
||||
const result = getComplexityWithColor(3);
|
||||
expect(result).toMatch(/3/);
|
||||
expect(result).toContain('🟢');
|
||||
});
|
||||
test('should handle missing tasks in the task list', () => {
|
||||
const dependencies = [1, 999];
|
||||
const allTasks = [{ id: 1, status: 'done' }];
|
||||
|
||||
test('should handle non-numeric inputs', () => {
|
||||
const result = getComplexityWithColor('high');
|
||||
expect(result).toMatch(/high/);
|
||||
expect(result).toContain('🔴');
|
||||
});
|
||||
});
|
||||
});
|
||||
const result = formatDependenciesWithStatus(dependencies, allTasks);
|
||||
expect(result).toBe('1, 999 (Not found)');
|
||||
});
|
||||
});
|
||||
|
||||
describe('createProgressBar function', () => {
|
||||
test('should create a progress bar with the correct percentage', () => {
|
||||
const result = createProgressBar(50, 10);
|
||||
expect(result).toBe('█████░░░░░ 50%');
|
||||
});
|
||||
|
||||
test('should handle 0% progress', () => {
|
||||
const result = createProgressBar(0, 10);
|
||||
expect(result).toBe('░░░░░░░░░░ 0%');
|
||||
});
|
||||
|
||||
test('should handle 100% progress', () => {
|
||||
const result = createProgressBar(100, 10);
|
||||
expect(result).toBe('██████████ 100%');
|
||||
});
|
||||
|
||||
test('should handle invalid percentages by clamping', () => {
|
||||
const result1 = createProgressBar(0, 10); // -10 should clamp to 0
|
||||
expect(result1).toBe('░░░░░░░░░░ 0%');
|
||||
|
||||
const result2 = createProgressBar(100, 10); // 150 should clamp to 100
|
||||
expect(result2).toBe('██████████ 100%');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getComplexityWithColor function', () => {
|
||||
test('should return high complexity in red', () => {
|
||||
const result = getComplexityWithColor(8);
|
||||
expect(result).toMatch(/8/);
|
||||
expect(result).toContain('🔴');
|
||||
});
|
||||
|
||||
test('should return medium complexity in yellow', () => {
|
||||
const result = getComplexityWithColor(5);
|
||||
expect(result).toMatch(/5/);
|
||||
expect(result).toContain('🟡');
|
||||
});
|
||||
|
||||
test('should return low complexity in green', () => {
|
||||
const result = getComplexityWithColor(3);
|
||||
expect(result).toMatch(/3/);
|
||||
expect(result).toContain('🟢');
|
||||
});
|
||||
|
||||
test('should handle non-numeric inputs', () => {
|
||||
const result = getComplexityWithColor('high');
|
||||
expect(result).toMatch(/high/);
|
||||
expect(result).toContain('🔴');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user