feat: enhance commands with multi-subtask support, MCP integration, and update notifications

- Add support for comma-separated subtask IDs in remove-subtask command
- Implement MCP configuration in project initialization
- Add package update notification system with version comparison
- Improve command documentation with boolean flag conventions
- Add comprehensive error handling for unknown options
- Update help text with better examples and formatting
- Implement proper validation for command inputs
- Add global error handling patterns with helpful user messages
This commit is contained in:
Eyal Toledano
2025-03-27 16:14:12 -04:00
parent 08d3f2db26
commit 1d807541ae
10 changed files with 956 additions and 64 deletions

View File

@@ -52,6 +52,28 @@ alwaysApply: false
> **Note**: Although options are defined with kebab-case (`--num-tasks`), Commander.js stores them internally as camelCase properties. Access them in code as `options.numTasks`, not `options['num-tasks']`.
- **Boolean Flag Conventions**:
- ✅ DO: Use positive flags with `--skip-` prefix for disabling behavior
- ❌ DON'T: Use negated boolean flags with `--no-` prefix
- ✅ DO: Use consistent flag handling across all commands
```javascript
// ✅ DO: Use positive flag with skip- prefix
.option('--skip-generate', 'Skip generating task files')
// ❌ DON'T: Use --no- prefix
.option('--no-generate', 'Skip generating task files')
```
> **Important**: When handling boolean flags in the code, make your intent clear:
```javascript
// ✅ DO: Use clear variable naming that matches the flag's intent
const generateFiles = !options.skipGenerate;
// ❌ DON'T: Use confusing double negatives
const dontSkipGenerate = !options.skipGenerate;
```
## Input Validation
- **Required Parameters**:
@@ -143,6 +165,59 @@ alwaysApply: false
}
```
- **Unknown Options Handling**:
- ✅ DO: Provide clear error messages for unknown options
- ✅ DO: Show available options when an unknown option is used
- ✅ DO: Include command-specific help displays for common errors
- ❌ DON'T: Allow unknown options with `.allowUnknownOption()`
```javascript
// ✅ DO: Register global error handlers for unknown options
programInstance.on('option:unknown', function(unknownOption) {
const commandName = this._name || 'unknown';
console.error(chalk.red(`Error: Unknown option '${unknownOption}'`));
console.error(chalk.yellow(`Run 'task-master ${commandName} --help' to see available options`));
process.exit(1);
});
// ✅ DO: Add command-specific help displays
function showCommandHelp() {
console.log(boxen(
chalk.white.bold('Command Help') + '\n\n' +
chalk.cyan('Usage:') + '\n' +
` task-master command --option1=<value> [options]\n\n` +
chalk.cyan('Options:') + '\n' +
' --option1 <value> Description of option1 (required)\n' +
' --option2 <value> Description of option2\n\n' +
chalk.cyan('Examples:') + '\n' +
' task-master command --option1=value --option2=value',
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
));
}
```
- **Global Error Handling**:
- ✅ DO: Set up global error handlers for uncaught exceptions
- ✅ DO: Detect and format Commander-specific errors
- ✅ DO: Provide suitable guidance for fixing common errors
```javascript
// ✅ DO: Set up global error handlers with helpful messages
process.on('uncaughtException', (err) => {
// Handle Commander-specific errors
if (err.code === 'commander.unknownOption') {
const option = err.message.match(/'([^']+)'/)?.[1];
console.error(chalk.red(`Error: Unknown option '${option}'`));
console.error(chalk.yellow(`Run 'task-master <command> --help' to see available options`));
process.exit(1);
}
// Handle other error types...
console.error(chalk.red(`Error: ${err.message}`));
process.exit(1);
});
```
## Integration with Other Modules
- **Import Organization**:

View File

@@ -549,6 +549,9 @@ function createProjectStructure(projectName, projectDescription, projectVersion,
log('success', 'Created package.json');
}
// Setup MCP configuration for integration with Cursor
setupMCPConfiguration(targetDir, packageJson.name);
// Copy template files with replacements
const replacements = {
projectName,
@@ -661,6 +664,84 @@ function createProjectStructure(projectName, projectDescription, projectVersion,
));
}
// Function to setup MCP configuration for Cursor integration
function setupMCPConfiguration(targetDir, projectName) {
const mcpDirPath = path.join(targetDir, '.cursor');
const mcpJsonPath = path.join(mcpDirPath, 'mcp.json');
log('info', 'Setting up MCP configuration for Cursor integration...');
// Create .cursor directory if it doesn't exist
ensureDirectoryExists(mcpDirPath);
// New MCP config to be added - references the installed package
const newMCPServer = {
"task-master-ai": {
"command": "npx",
"args": [
"task-master-ai",
"mcp-server"
]
}
};
// Check if mcp.json already exists
if (fs.existsSync(mcpJsonPath)) {
log('info', 'MCP configuration file already exists, updating...');
try {
// Read existing config
const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, 'utf8'));
// Initialize mcpServers if it doesn't exist
if (!mcpConfig.mcpServers) {
mcpConfig.mcpServers = {};
}
// Add the task-master-ai server if it doesn't exist
if (!mcpConfig.mcpServers["task-master-ai"]) {
mcpConfig.mcpServers["task-master-ai"] = newMCPServer["task-master-ai"];
log('info', 'Added task-master-ai server to existing MCP configuration');
} else {
log('info', 'task-master-ai server already configured in mcp.json');
}
// Write the updated configuration
fs.writeFileSync(
mcpJsonPath,
JSON.stringify(mcpConfig, null, 4)
);
log('success', 'Updated MCP configuration file');
} catch (error) {
log('error', `Failed to update MCP configuration: ${error.message}`);
// Create a backup before potentially modifying
const backupPath = `${mcpJsonPath}.backup-${Date.now()}`;
if (fs.existsSync(mcpJsonPath)) {
fs.copyFileSync(mcpJsonPath, backupPath);
log('info', `Created backup of existing mcp.json at ${backupPath}`);
}
// Create new configuration
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
log('warn', 'Created new MCP configuration file (backup of original file was created if it existed)');
}
} else {
// If mcp.json doesn't exist, create it
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
log('success', 'Created MCP configuration file for Cursor integration');
}
// Add note to console about MCP integration
log('info', 'MCP server will use the installed task-master-ai package');
}
// Run the initialization if this script is executed directly
// The original check doesn't work with npx and global commands
// if (process.argv[1] === fileURLToPath(import.meta.url)) {

View File

@@ -8,6 +8,7 @@ import path from 'path';
import chalk from 'chalk';
import boxen from 'boxen';
import fs from 'fs';
import https from 'https';
import { CONFIG, log, readJSON } from './utils.js';
import {
@@ -638,22 +639,33 @@ function registerCommands(programInstance) {
.command('remove-subtask')
.description('Remove a subtask from its parent task')
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.option('-i, --id <id>', 'Subtask ID to remove in format "parentId.subtaskId" (required)')
.option('-i, --id <id>', 'Subtask ID(s) to remove in format "parentId.subtaskId" (can be comma-separated for multiple subtasks)')
.option('-c, --convert', 'Convert the subtask to a standalone task instead of deleting it')
.option('--skip-generate', 'Skip regenerating task files')
.action(async (options) => {
const tasksPath = options.file;
const subtaskId = options.id;
const subtaskIds = options.id;
const convertToTask = options.convert || false;
const generateFiles = !options.skipGenerate;
if (!subtaskId) {
console.error(chalk.red('Error: --id parameter is required. Please provide a subtask ID in format "parentId.subtaskId".'));
if (!subtaskIds) {
console.error(chalk.red('Error: --id parameter is required. Please provide subtask ID(s) in format "parentId.subtaskId".'));
showRemoveSubtaskHelp();
process.exit(1);
}
try {
// Split by comma to support multiple subtask IDs
const subtaskIdArray = subtaskIds.split(',').map(id => id.trim());
for (const subtaskId of subtaskIdArray) {
// Validate subtask ID format
if (!subtaskId.includes('.')) {
console.error(chalk.red(`Error: Subtask ID "${subtaskId}" must be in format "parentId.subtaskId"`));
showRemoveSubtaskHelp();
process.exit(1);
}
console.log(chalk.blue(`Removing subtask ${subtaskId}...`));
if (convertToTask) {
console.log(chalk.blue('The subtask will be converted to a standalone task'));
@@ -681,6 +693,7 @@ function registerCommands(programInstance) {
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
));
}
}
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
showRemoveSubtaskHelp();
@@ -700,12 +713,13 @@ function registerCommands(programInstance) {
chalk.cyan('Usage:') + '\n' +
` task-master remove-subtask --id=<parentId.subtaskId> [options]\n\n` +
chalk.cyan('Options:') + '\n' +
' -i, --id <id> Subtask ID to remove in format "parentId.subtaskId" (required)\n' +
' -i, --id <id> Subtask ID(s) to remove in format "parentId.subtaskId" (can be comma-separated, required)\n' +
' -c, --convert Convert the subtask to a standalone task instead of deleting it\n' +
' -f, --file <file> Path to the tasks file (default: "tasks/tasks.json")\n' +
' --skip-generate Skip regenerating task files\n\n' +
chalk.cyan('Examples:') + '\n' +
' task-master remove-subtask --id=5.2\n' +
' task-master remove-subtask --id=5.2,6.3,7.1\n' +
' task-master remove-subtask --id=5.2 --convert',
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
));
@@ -783,6 +797,132 @@ function setupCLI() {
return programInstance;
}
/**
* Check for newer version of task-master-ai
* @returns {Promise<{currentVersion: string, latestVersion: string, needsUpdate: boolean}>}
*/
async function checkForUpdate() {
// Get current version from package.json
let currentVersion = CONFIG.projectVersion;
try {
// Try to get the version from the installed package
const packageJsonPath = path.join(process.cwd(), 'node_modules', 'task-master-ai', 'package.json');
if (fs.existsSync(packageJsonPath)) {
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
currentVersion = packageJson.version;
}
} catch (error) {
// Silently fail and use default
log('debug', `Error reading current package version: ${error.message}`);
}
return new Promise((resolve) => {
// Get the latest version from npm registry
const options = {
hostname: 'registry.npmjs.org',
path: '/task-master-ai',
method: 'GET',
headers: {
'Accept': 'application/vnd.npm.install-v1+json' // Lightweight response
}
};
const req = https.request(options, (res) => {
let data = '';
res.on('data', (chunk) => {
data += chunk;
});
res.on('end', () => {
try {
const npmData = JSON.parse(data);
const latestVersion = npmData['dist-tags']?.latest || currentVersion;
// Compare versions
const needsUpdate = compareVersions(currentVersion, latestVersion) < 0;
resolve({
currentVersion,
latestVersion,
needsUpdate
});
} catch (error) {
log('debug', `Error parsing npm response: ${error.message}`);
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
}
});
});
req.on('error', (error) => {
log('debug', `Error checking for updates: ${error.message}`);
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
});
// Set a timeout to avoid hanging if npm is slow
req.setTimeout(3000, () => {
req.abort();
log('debug', 'Update check timed out');
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
});
req.end();
});
}
/**
* Compare semantic versions
* @param {string} v1 - First version
* @param {string} v2 - Second version
* @returns {number} -1 if v1 < v2, 0 if v1 = v2, 1 if v1 > v2
*/
function compareVersions(v1, v2) {
const v1Parts = v1.split('.').map(p => parseInt(p, 10));
const v2Parts = v2.split('.').map(p => parseInt(p, 10));
for (let i = 0; i < Math.max(v1Parts.length, v2Parts.length); i++) {
const v1Part = v1Parts[i] || 0;
const v2Part = v2Parts[i] || 0;
if (v1Part < v2Part) return -1;
if (v1Part > v2Part) return 1;
}
return 0;
}
/**
* Display upgrade notification message
* @param {string} currentVersion - Current version
* @param {string} latestVersion - Latest version
*/
function displayUpgradeNotification(currentVersion, latestVersion) {
const message = boxen(
`${chalk.blue.bold('Update Available!')} ${chalk.dim(currentVersion)}${chalk.green(latestVersion)}\n\n` +
`Run ${chalk.cyan('npm i task-master-ai@latest -g')} to update to the latest version with new features and bug fixes.`,
{
padding: 1,
margin: { top: 1, bottom: 1 },
borderColor: 'yellow',
borderStyle: 'round'
}
);
console.log(message);
}
/**
* Parse arguments and run the CLI
* @param {Array} argv - Command-line arguments
@@ -800,9 +940,18 @@ async function runCLI(argv = process.argv) {
process.exit(0);
}
// Start the update check in the background - don't await yet
const updateCheckPromise = checkForUpdate();
// Setup and parse
const programInstance = setupCLI();
await programInstance.parseAsync(argv);
// After command execution, check if an update is available
const updateInfo = await updateCheckPromise;
if (updateInfo.needsUpdate) {
displayUpgradeNotification(updateInfo.currentVersion, updateInfo.latestVersion);
}
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
@@ -817,5 +966,8 @@ async function runCLI(argv = process.argv) {
export {
registerCommands,
setupCLI,
runCLI
runCLI,
checkForUpdate,
compareVersions,
displayUpgradeNotification
};

View File

@@ -20,39 +20,123 @@ This task involves completing the Model Context Protocol (MCP) server implementa
The implementation must ensure compatibility with existing MCP clients and follow RESTful API design principles, while supporting concurrent requests and maintaining robust error handling.
# Test Strategy:
Testing for the updated MCP server functionality should include:
Testing for the MCP server implementation will follow a comprehensive approach based on our established testing guidelines:
1. Unit tests:
- Validate direct function imports for Task Master tools, replacing CLI-based execution.
- Test updated authentication and authorization mechanisms.
- Verify context management operations (CRUD, metadata, windowing).
- Test caching mechanisms for frequently accessed contexts.
- Validate proper tool registration with descriptions and parameters.
## Test Organization
2. Integration tests:
- Test the MCP server with FastMCP's stdio transport mode.
- Verify end-to-end request/response cycles for each endpoint.
- Ensure compatibility with the ModelContextProtocol SDK.
- Test the tool registration process in `tools/index.js` for correctness and efficiency.
1. **Unit Tests** (`tests/unit/mcp-server/`):
- Test individual MCP server components in isolation
- Mock all external dependencies including FastMCP SDK
- Test each tool implementation separately
- Verify direct function imports work correctly
- Test context management and caching mechanisms
- Example files: `context-manager.test.js`, `tool-registration.test.js`, `direct-imports.test.js`
3. Performance tests:
- Benchmark response times for context operations with large datasets.
- Test caching mechanisms and concurrent request handling.
- Measure memory usage and server stability under load.
2. **Integration Tests** (`tests/integration/mcp-server/`):
- Test interactions between MCP server components
- Verify proper tool registration with FastMCP
- Test context flow between components
- Validate error handling across module boundaries
- Example files: `server-tool-integration.test.js`, `context-flow.test.js`
4. Security tests:
- Validate the robustness of authentication/authorization mechanisms.
- Test for vulnerabilities such as injection attacks, CSRF, and unauthorized access.
3. **End-to-End Tests** (`tests/e2e/mcp-server/`):
- Test complete MCP server workflows
- Verify server instantiation via different methods (direct, npx, global install)
- Test actual stdio communication with mock clients
- Example files: `server-startup.e2e.test.js`, `client-communication.e2e.test.js`
5. Deployment tests:
- Verify proper server instantiation and operation when installed via `npx` or `npm i -g`.
- Test configuration loading from `mcp.json`.
4. **Test Fixtures** (`tests/fixtures/mcp-server/`):
- Sample context data
- Mock tool definitions
- Sample MCP requests and responses
6. Documentation validation:
- Ensure all examples in the documentation are accurate and functional.
- Verify manual testing workflows using tools like curl or Postman.
## Testing Approach
All tests should be automated and integrated into the CI/CD pipeline to ensure consistent quality.
### Module Mocking Strategy
```javascript
// Mock the FastMCP SDK
jest.mock('@model-context-protocol/sdk', () => ({
MCPServer: jest.fn().mockImplementation(() => ({
registerTool: jest.fn(),
registerResource: jest.fn(),
start: jest.fn().mockResolvedValue(undefined),
stop: jest.fn().mockResolvedValue(undefined)
})),
MCPError: jest.fn().mockImplementation(function(message, code) {
this.message = message;
this.code = code;
})
}));
// Import modules after mocks
import { MCPServer, MCPError } from '@model-context-protocol/sdk';
import { initMCPServer } from '../../scripts/mcp-server.js';
```
### Context Management Testing
- Test context creation, retrieval, and manipulation
- Verify caching mechanisms work correctly
- Test context windowing and metadata handling
- Validate context persistence across server restarts
### Direct Function Import Testing
- Verify Task Master functions are imported correctly
- Test performance improvements compared to CLI execution
- Validate error handling with direct imports
### Tool Registration Testing
- Verify tools are registered with proper descriptions and parameters
- Test decorator-based registration patterns
- Validate tool execution with different input types
### Error Handling Testing
- Test all error paths with appropriate MCPError types
- Verify error propagation to clients
- Test recovery from various error conditions
### Performance Testing
- Benchmark response times with and without caching
- Test memory usage under load
- Verify concurrent request handling
## Test Quality Guidelines
- Follow TDD approach when possible
- Maintain test independence and isolation
- Use descriptive test names explaining expected behavior
- Aim for 80%+ code coverage, with critical paths at 100%
- Follow the mock-first-then-import pattern for all Jest mocks
- Avoid testing implementation details that might change
- Ensure tests don't depend on execution order
## Specific Test Cases
1. **Server Initialization**
- Test server creation with various configuration options
- Verify proper tool and resource registration
- Test server startup and shutdown procedures
2. **Context Operations**
- Test context creation, retrieval, update, and deletion
- Verify context windowing and truncation
- Test context metadata and tagging
3. **Tool Execution**
- Test each tool with various input parameters
- Verify proper error handling for invalid inputs
- Test tool execution performance
4. **MCP.json Integration**
- Test creation and updating of .cursor/mcp.json
- Verify proper server registration in mcp.json
- Test handling of existing mcp.json files
5. **Transport Handling**
- Test stdio communication
- Verify proper message formatting
- Test error handling in transport layer
All tests will be automated and integrated into the CI/CD pipeline to ensure consistent quality.
# Subtasks:
## 1. Create Core MCP Server Module and Basic Structure [done]
@@ -122,7 +206,7 @@ Testing approach:
- Test error handling with invalid inputs
- Benchmark endpoint performance
## 6. Refactor MCP Server to Leverage ModelContextProtocol SDK [pending]
## 6. Refactor MCP Server to Leverage ModelContextProtocol SDK [deferred]
### Dependencies: 23.1, 23.2, 23.3
### Description: Integrate the ModelContextProtocol SDK directly into the MCP server implementation to streamline tool registration and resource handling.
### Details:
@@ -138,7 +222,7 @@ Testing approach:
- Validate compatibility with existing MCP clients.
- Benchmark performance improvements from SDK integration.
## 8. Implement Direct Function Imports and Replace CLI-based Execution [pending]
## 8. Implement Direct Function Imports and Replace CLI-based Execution [in-progress]
### Dependencies: None
### Description: Refactor the MCP server implementation to use direct Task Master function imports instead of the current CLI-based execution using child_process.spawnSync. This will improve performance, reliability, and enable better error handling.
### Details:
@@ -149,7 +233,7 @@ Testing approach:
5. Add unit tests to verify the function imports work correctly
6. Test performance improvements by comparing response times between CLI and function import approaches
## 9. Implement Context Management and Caching Mechanisms [pending]
## 9. Implement Context Management and Caching Mechanisms [deferred]
### Dependencies: 23.1
### Description: Enhance the MCP server with proper context management and caching to improve performance and user experience, especially for frequently accessed data and contexts.
### Details:
@@ -193,3 +277,15 @@ Testing approach:
### Details:
1. Set up Jest testing framework with proper configuration\n2. Create MCPTestClient for testing FastMCP server interaction\n3. Implement unit tests for individual tool functions\n4. Create integration tests for end-to-end request/response cycles\n5. Set up test fixtures and mock data\n6. Implement test coverage reporting\n7. Document testing guidelines and examples
## 14. Add MCP.json to the Init Workflow [done]
### Dependencies: 23.1, 23.3
### Description: Implement functionality to create or update .cursor/mcp.json during project initialization, handling cases where: 1) If there's no mcp.json, create it with the appropriate configuration; 2) If there is an mcp.json, intelligently append to it without syntax errors like trailing commas
### Details:
1. Create functionality to detect if .cursor/mcp.json exists in the project\n2. Implement logic to create a new mcp.json file with proper structure if it doesn't exist\n3. Add functionality to read and parse existing mcp.json if it exists\n4. Create method to add a new taskmaster-ai server entry to the mcpServers object\n5. Implement intelligent JSON merging that avoids trailing commas and syntax errors\n6. Ensure proper formatting and indentation in the generated/updated JSON\n7. Add validation to verify the updated configuration is valid JSON\n8. Include this functionality in the init workflow\n9. Add error handling for file system operations and JSON parsing\n10. Document the mcp.json structure and integration process
## 15. Implement SSE Support for Real-time Updates [deferred]
### Dependencies: 23.1, 23.3, 23.11
### Description: Add Server-Sent Events (SSE) capabilities to the MCP server to enable real-time updates and streaming of task execution progress, logs, and status changes to clients
### Details:
1. Research and implement SSE protocol for the MCP server\n2. Create dedicated SSE endpoints for event streaming\n3. Implement event emitter pattern for internal event management\n4. Add support for different event types (task status, logs, errors)\n5. Implement client connection management with proper keep-alive handling\n6. Add filtering capabilities to allow subscribing to specific event types\n7. Create in-memory event buffer for clients reconnecting\n8. Document SSE endpoint usage and client implementation examples\n9. Add robust error handling for dropped connections\n10. Implement rate limiting and backpressure mechanisms\n11. Add authentication for SSE connections

56
tasks/task_038.txt Normal file
View File

@@ -0,0 +1,56 @@
# Task ID: 38
# Title: Implement Version Check System with Upgrade Notifications
# Status: done
# Dependencies: None
# Priority: high
# Description: Create a system that checks for newer package versions and displays upgrade notifications when users run any command, informing them to update to the latest version.
# Details:
Implement a version check mechanism that runs automatically with every command execution:
1. Create a new module (e.g., `versionChecker.js`) that will:
- Fetch the latest version from npm registry using the npm registry API (https://registry.npmjs.org/task-master-ai/latest)
- Compare it with the current installed version (from package.json)
- Store the last check timestamp to avoid excessive API calls (check once per day)
- Cache the result to minimize network requests
2. The notification should:
- Use colored text (e.g., yellow background with black text) to be noticeable
- Include the current version and latest version
- Show the exact upgrade command: 'npm i task-master-ai@latest'
- Be displayed at the beginning or end of command output, not interrupting the main content
- Include a small separator line to distinguish it from command output
3. Implementation considerations:
- Handle network failures gracefully (don't block command execution if version check fails)
- Add a configuration option to disable update checks if needed
- Ensure the check is lightweight and doesn't significantly impact command performance
- Consider using a package like 'semver' for proper version comparison
- Implement a cooldown period (e.g., only check once per day) to avoid excessive API calls
4. The version check should be integrated into the main command execution flow so it runs for all commands automatically.
# Test Strategy:
1. Manual testing:
- Install an older version of the package
- Run various commands and verify the update notification appears
- Update to the latest version and confirm the notification no longer appears
- Test with network disconnected to ensure graceful handling of failures
2. Unit tests:
- Mock the npm registry response to test different scenarios:
- When a newer version exists
- When using the latest version
- When the registry is unavailable
- Test the version comparison logic with various version strings
- Test the cooldown/caching mechanism works correctly
3. Integration tests:
- Create a test that runs a command and verifies the notification appears in the expected format
- Test that the notification appears for all commands
- Verify the notification doesn't interfere with normal command output
4. Edge cases to test:
- Pre-release versions (alpha/beta)
- Very old versions
- When package.json is missing or malformed
- When npm registry returns unexpected data

View File

@@ -1344,7 +1344,7 @@
],
"priority": "medium",
"details": "This task involves completing the Model Context Protocol (MCP) server implementation for Task Master using FastMCP. Key updates include:\n\n1. Transition from CLI-based execution (currently using `child_process.spawnSync`) to direct Task Master function imports for improved performance and reliability.\n2. Implement caching mechanisms for frequently accessed contexts to enhance performance, leveraging FastMCP's efficient transport mechanisms (e.g., stdio).\n3. Refactor context management to align with best practices for handling large context windows, metadata, and tagging.\n4. Refactor tool registration in `tools/index.js` to include clear descriptions and parameter definitions, leveraging FastMCP's decorator-based patterns for better integration.\n5. Enhance transport type handling to ensure proper stdio communication and compatibility with FastMCP.\n6. Ensure the MCP server can be instantiated and run correctly when installed globally via `npx` or `npm i -g`.\n7. Integrate the ModelContextProtocol SDK directly to streamline resource and tool registration, ensuring compatibility with FastMCP's transport mechanisms.\n8. Identify and address missing components or functionalities to meet FastMCP best practices, such as robust error handling, monitoring endpoints, and concurrency support.\n9. Update documentation to include examples of using the MCP server with FastMCP, detailed setup instructions, and client integration guides.\n\nThe implementation must ensure compatibility with existing MCP clients and follow RESTful API design principles, while supporting concurrent requests and maintaining robust error handling.",
"testStrategy": "Testing for the updated MCP server functionality should include:\n\n1. Unit tests:\n - Validate direct function imports for Task Master tools, replacing CLI-based execution.\n - Test updated authentication and authorization mechanisms.\n - Verify context management operations (CRUD, metadata, windowing).\n - Test caching mechanisms for frequently accessed contexts.\n - Validate proper tool registration with descriptions and parameters.\n\n2. Integration tests:\n - Test the MCP server with FastMCP's stdio transport mode.\n - Verify end-to-end request/response cycles for each endpoint.\n - Ensure compatibility with the ModelContextProtocol SDK.\n - Test the tool registration process in `tools/index.js` for correctness and efficiency.\n\n3. Performance tests:\n - Benchmark response times for context operations with large datasets.\n - Test caching mechanisms and concurrent request handling.\n - Measure memory usage and server stability under load.\n\n4. Security tests:\n - Validate the robustness of authentication/authorization mechanisms.\n - Test for vulnerabilities such as injection attacks, CSRF, and unauthorized access.\n\n5. Deployment tests:\n - Verify proper server instantiation and operation when installed via `npx` or `npm i -g`.\n - Test configuration loading from `mcp.json`.\n\n6. Documentation validation:\n - Ensure all examples in the documentation are accurate and functional.\n - Verify manual testing workflows using tools like curl or Postman.\n\nAll tests should be automated and integrated into the CI/CD pipeline to ensure consistent quality.",
"testStrategy": "Testing for the MCP server implementation will follow a comprehensive approach based on our established testing guidelines:\n\n## Test Organization\n\n1. **Unit Tests** (`tests/unit/mcp-server/`):\n - Test individual MCP server components in isolation\n - Mock all external dependencies including FastMCP SDK\n - Test each tool implementation separately\n - Verify direct function imports work correctly\n - Test context management and caching mechanisms\n - Example files: `context-manager.test.js`, `tool-registration.test.js`, `direct-imports.test.js`\n\n2. **Integration Tests** (`tests/integration/mcp-server/`):\n - Test interactions between MCP server components\n - Verify proper tool registration with FastMCP\n - Test context flow between components\n - Validate error handling across module boundaries\n - Example files: `server-tool-integration.test.js`, `context-flow.test.js`\n\n3. **End-to-End Tests** (`tests/e2e/mcp-server/`):\n - Test complete MCP server workflows\n - Verify server instantiation via different methods (direct, npx, global install)\n - Test actual stdio communication with mock clients\n - Example files: `server-startup.e2e.test.js`, `client-communication.e2e.test.js`\n\n4. **Test Fixtures** (`tests/fixtures/mcp-server/`):\n - Sample context data\n - Mock tool definitions\n - Sample MCP requests and responses\n\n## Testing Approach\n\n### Module Mocking Strategy\n```javascript\n// Mock the FastMCP SDK\njest.mock('@model-context-protocol/sdk', () => ({\n MCPServer: jest.fn().mockImplementation(() => ({\n registerTool: jest.fn(),\n registerResource: jest.fn(),\n start: jest.fn().mockResolvedValue(undefined),\n stop: jest.fn().mockResolvedValue(undefined)\n })),\n MCPError: jest.fn().mockImplementation(function(message, code) {\n this.message = message;\n this.code = code;\n })\n}));\n\n// Import modules after mocks\nimport { MCPServer, MCPError } from '@model-context-protocol/sdk';\nimport { initMCPServer } from '../../scripts/mcp-server.js';\n```\n\n### Context Management Testing\n- Test context creation, retrieval, and manipulation\n- Verify caching mechanisms work correctly\n- Test context windowing and metadata handling\n- Validate context persistence across server restarts\n\n### Direct Function Import Testing\n- Verify Task Master functions are imported correctly\n- Test performance improvements compared to CLI execution\n- Validate error handling with direct imports\n\n### Tool Registration Testing\n- Verify tools are registered with proper descriptions and parameters\n- Test decorator-based registration patterns\n- Validate tool execution with different input types\n\n### Error Handling Testing\n- Test all error paths with appropriate MCPError types\n- Verify error propagation to clients\n- Test recovery from various error conditions\n\n### Performance Testing\n- Benchmark response times with and without caching\n- Test memory usage under load\n- Verify concurrent request handling\n\n## Test Quality Guidelines\n\n- Follow TDD approach when possible\n- Maintain test independence and isolation\n- Use descriptive test names explaining expected behavior\n- Aim for 80%+ code coverage, with critical paths at 100%\n- Follow the mock-first-then-import pattern for all Jest mocks\n- Avoid testing implementation details that might change\n- Ensure tests don't depend on execution order\n\n## Specific Test Cases\n\n1. **Server Initialization**\n - Test server creation with various configuration options\n - Verify proper tool and resource registration\n - Test server startup and shutdown procedures\n\n2. **Context Operations**\n - Test context creation, retrieval, update, and deletion\n - Verify context windowing and truncation\n - Test context metadata and tagging\n\n3. **Tool Execution**\n - Test each tool with various input parameters\n - Verify proper error handling for invalid inputs\n - Test tool execution performance\n\n4. **MCP.json Integration**\n - Test creation and updating of .cursor/mcp.json\n - Verify proper server registration in mcp.json\n - Test handling of existing mcp.json files\n\n5. **Transport Handling**\n - Test stdio communication\n - Verify proper message formatting\n - Test error handling in transport layer\n\nAll tests will be automated and integrated into the CI/CD pipeline to ensure consistent quality.",
"subtasks": [
{
"id": 1,
@@ -1388,7 +1388,7 @@
3
],
"details": "Implementation steps:\n1. Replace manual tool registration with ModelContextProtocol SDK methods.\n2. Use SDK utilities to simplify resource and template management.\n3. Ensure compatibility with FastMCP's transport mechanisms.\n4. Update server initialization to include SDK-based configurations.\n\nTesting approach:\n- Verify SDK integration with all MCP endpoints.\n- Test resource and template registration using SDK methods.\n- Validate compatibility with existing MCP clients.\n- Benchmark performance improvements from SDK integration.",
"status": "pending",
"status": "deferred",
"parentTaskId": 23
},
{
@@ -1397,7 +1397,7 @@
"description": "Refactor the MCP server implementation to use direct Task Master function imports instead of the current CLI-based execution using child_process.spawnSync. This will improve performance, reliability, and enable better error handling.",
"dependencies": [],
"details": "1. Create a new module to import and expose Task Master core functions directly\n2. Modify tools/utils.js to remove executeTaskMasterCommand and replace with direct function calls\n3. Update each tool implementation (listTasks.js, showTask.js, etc.) to use the direct function imports\n4. Implement proper error handling with try/catch blocks and FastMCP's MCPError\n5. Add unit tests to verify the function imports work correctly\n6. Test performance improvements by comparing response times between CLI and function import approaches",
"status": "pending",
"status": "in-progress",
"parentTaskId": 23
},
{
@@ -1408,7 +1408,7 @@
1
],
"details": "1. Implement a context manager class that leverages FastMCP's Context object\n2. Add caching for frequently accessed task data with configurable TTL settings\n3. Implement context tagging for better organization of context data\n4. Add methods to efficiently handle large context windows\n5. Create helper functions for storing and retrieving context data\n6. Implement cache invalidation strategies for task updates\n7. Add cache statistics for monitoring performance\n8. Create unit tests for context management and caching functionality",
"status": "pending",
"status": "deferred",
"parentTaskId": 23
},
{
@@ -1458,6 +1458,31 @@
"23.8"
],
"parentTaskId": 23
},
{
"id": 14,
"title": "Add MCP.json to the Init Workflow",
"description": "Implement functionality to create or update .cursor/mcp.json during project initialization, handling cases where: 1) If there's no mcp.json, create it with the appropriate configuration; 2) If there is an mcp.json, intelligently append to it without syntax errors like trailing commas",
"details": "1. Create functionality to detect if .cursor/mcp.json exists in the project\\n2. Implement logic to create a new mcp.json file with proper structure if it doesn't exist\\n3. Add functionality to read and parse existing mcp.json if it exists\\n4. Create method to add a new taskmaster-ai server entry to the mcpServers object\\n5. Implement intelligent JSON merging that avoids trailing commas and syntax errors\\n6. Ensure proper formatting and indentation in the generated/updated JSON\\n7. Add validation to verify the updated configuration is valid JSON\\n8. Include this functionality in the init workflow\\n9. Add error handling for file system operations and JSON parsing\\n10. Document the mcp.json structure and integration process",
"status": "done",
"dependencies": [
"23.1",
"23.3"
],
"parentTaskId": 23
},
{
"id": 15,
"title": "Implement SSE Support for Real-time Updates",
"description": "Add Server-Sent Events (SSE) capabilities to the MCP server to enable real-time updates and streaming of task execution progress, logs, and status changes to clients",
"details": "1. Research and implement SSE protocol for the MCP server\\n2. Create dedicated SSE endpoints for event streaming\\n3. Implement event emitter pattern for internal event management\\n4. Add support for different event types (task status, logs, errors)\\n5. Implement client connection management with proper keep-alive handling\\n6. Add filtering capabilities to allow subscribing to specific event types\\n7. Create in-memory event buffer for clients reconnecting\\n8. Document SSE endpoint usage and client implementation examples\\n9. Add robust error handling for dropped connections\\n10. Implement rate limiting and backpressure mechanisms\\n11. Add authentication for SSE connections",
"status": "deferred",
"dependencies": [
"23.1",
"23.3",
"23.11"
],
"parentTaskId": 23
}
]
},
@@ -1884,6 +1909,16 @@
"priority": "medium",
"details": "This task involves integrating Google's Gemini API across all main AI services that currently use Claude:\n\n1. Create a new GeminiService class that implements the same interface as the existing ClaudeService\n2. Implement authentication and API key management for Gemini API\n3. Map our internal prompt formats to Gemini's expected input format\n4. Handle Gemini-specific parameters (temperature, top_p, etc.) and response parsing\n5. Update the AI service factory/provider to support selecting Gemini as an alternative\n6. Add configuration options in settings to allow users to select Gemini as their preferred provider\n7. Implement proper error handling for Gemini-specific API errors\n8. Ensure streaming responses are properly supported if Gemini offers this capability\n9. Update documentation to reflect the new Gemini option\n10. Consider implementing model selection if Gemini offers multiple models (e.g., Gemini Pro, Gemini Ultra)\n11. Ensure all existing AI capabilities (summarization, code generation, etc.) maintain feature parity when using Gemini\n\nThe implementation should follow the same pattern as the recent Ollama integration (Task #36) to maintain consistency in how alternative AI providers are supported.",
"testStrategy": "Testing should verify Gemini integration works correctly across all AI services:\n\n1. Unit tests:\n - Test GeminiService class methods with mocked API responses\n - Verify proper error handling for common API errors\n - Test configuration and model selection functionality\n\n2. Integration tests:\n - Verify authentication and API connection with valid credentials\n - Test each AI service with Gemini to ensure proper functionality\n - Compare outputs between Claude and Gemini for the same inputs to verify quality\n\n3. End-to-end tests:\n - Test the complete user flow of switching to Gemini and using various AI features\n - Verify streaming responses work correctly if supported\n\n4. Performance tests:\n - Measure and compare response times between Claude and Gemini\n - Test with various input lengths to verify handling of context limits\n\n5. Manual testing:\n - Verify the quality of Gemini responses across different use cases\n - Test edge cases like very long inputs or specialized domain knowledge\n\nAll tests should pass with Gemini selected as the provider, and the user experience should be consistent regardless of which provider is selected."
},
{
"id": 38,
"title": "Implement Version Check System with Upgrade Notifications",
"description": "Create a system that checks for newer package versions and displays upgrade notifications when users run any command, informing them to update to the latest version.",
"status": "done",
"dependencies": [],
"priority": "high",
"details": "Implement a version check mechanism that runs automatically with every command execution:\n\n1. Create a new module (e.g., `versionChecker.js`) that will:\n - Fetch the latest version from npm registry using the npm registry API (https://registry.npmjs.org/task-master-ai/latest)\n - Compare it with the current installed version (from package.json)\n - Store the last check timestamp to avoid excessive API calls (check once per day)\n - Cache the result to minimize network requests\n\n2. The notification should:\n - Use colored text (e.g., yellow background with black text) to be noticeable\n - Include the current version and latest version\n - Show the exact upgrade command: 'npm i task-master-ai@latest'\n - Be displayed at the beginning or end of command output, not interrupting the main content\n - Include a small separator line to distinguish it from command output\n\n3. Implementation considerations:\n - Handle network failures gracefully (don't block command execution if version check fails)\n - Add a configuration option to disable update checks if needed\n - Ensure the check is lightweight and doesn't significantly impact command performance\n - Consider using a package like 'semver' for proper version comparison\n - Implement a cooldown period (e.g., only check once per day) to avoid excessive API calls\n\n4. The version check should be integrated into the main command execution flow so it runs for all commands automatically.",
"testStrategy": "1. Manual testing:\n - Install an older version of the package\n - Run various commands and verify the update notification appears\n - Update to the latest version and confirm the notification no longer appears\n - Test with network disconnected to ensure graceful handling of failures\n\n2. Unit tests:\n - Mock the npm registry response to test different scenarios:\n - When a newer version exists\n - When using the latest version\n - When the registry is unavailable\n - Test the version comparison logic with various version strings\n - Test the cooldown/caching mechanism works correctly\n\n3. Integration tests:\n - Create a test that runs a command and verifies the notification appears in the expected format\n - Test that the notification appears for all commands\n - Verify the notification doesn't interfere with normal command output\n\n4. Edge cases to test:\n - Pre-release versions (alpha/beta)\n - Very old versions\n - When package.json is missing or malformed\n - When npm registry returns unexpected data"
}
]
}

View File

@@ -0,0 +1,69 @@
import { checkForUpdate, displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
import fs from 'fs';
import path from 'path';
// Force our current version for testing
process.env.FORCE_VERSION = '0.9.30';
// Create a mock package.json in memory for testing
const mockPackageJson = {
name: 'task-master-ai',
version: '0.9.30'
};
// Modified version of checkForUpdate that doesn't use HTTP for testing
async function testCheckForUpdate(simulatedLatestVersion) {
// Get current version - use our forced version
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
console.log(`Using simulated current version: ${currentVersion}`);
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
// Compare versions
const needsUpdate = compareVersions(currentVersion, simulatedLatestVersion) < 0;
return {
currentVersion,
latestVersion: simulatedLatestVersion,
needsUpdate
};
}
// Test with current version older than latest (should show update notice)
async function runTest() {
console.log('=== Testing version check scenarios ===\n');
// Scenario 1: Update available
console.log('\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---');
const updateInfo1 = await testCheckForUpdate('1.0.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo1.currentVersion}`);
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
if (updateInfo1.needsUpdate) {
console.log('\nDisplaying upgrade notification:');
displayUpgradeNotification(updateInfo1.currentVersion, updateInfo1.latestVersion);
}
// Scenario 2: No update needed (versions equal)
console.log('\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---');
const updateInfo2 = await testCheckForUpdate('0.9.30');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo2.currentVersion}`);
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
// Scenario 3: Development version (current newer than latest)
console.log('\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---');
const updateInfo3 = await testCheckForUpdate('0.9.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo3.currentVersion}`);
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
console.log('\n=== Test complete ===');
}
// Run all tests
runTest();

22
test-version-check.js Normal file
View File

@@ -0,0 +1,22 @@
import { displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
// Simulate different version scenarios
console.log('=== Simulating version check ===\n');
// 1. Current version is older than latest (should show update notice)
console.log('Scenario 1: Current version older than latest');
displayUpgradeNotification('0.9.30', '1.0.0');
// 2. Current version same as latest (no update needed)
console.log('\nScenario 2: Current version same as latest (this would not normally show a notice)');
console.log('Current: 1.0.0, Latest: 1.0.0');
console.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));
console.log('Update needed:', compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No');
// 3. Current version newer than latest (e.g., development version, would not show notice)
console.log('\nScenario 3: Current version newer than latest (this would not normally show a notice)');
console.log('Current: 1.1.0, Latest: 1.0.0');
console.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));
console.log('Update needed:', compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No');
console.log('\n=== Test complete ===');

View File

@@ -527,3 +527,58 @@ describe('Commands Module', () => {
});
});
});
// Test the version comparison utility
describe('Version comparison', () => {
// Use a dynamic import for the commands module
let compareVersions;
beforeAll(async () => {
// Import the function we want to test dynamically
const commandsModule = await import('../../scripts/modules/commands.js');
compareVersions = commandsModule.compareVersions;
});
test('compareVersions correctly compares semantic versions', () => {
expect(compareVersions('1.0.0', '1.0.0')).toBe(0);
expect(compareVersions('1.0.0', '1.0.1')).toBe(-1);
expect(compareVersions('1.0.1', '1.0.0')).toBe(1);
expect(compareVersions('1.0.0', '1.1.0')).toBe(-1);
expect(compareVersions('1.1.0', '1.0.0')).toBe(1);
expect(compareVersions('1.0.0', '2.0.0')).toBe(-1);
expect(compareVersions('2.0.0', '1.0.0')).toBe(1);
expect(compareVersions('1.0', '1.0.0')).toBe(0);
expect(compareVersions('1.0.0.0', '1.0.0')).toBe(0);
expect(compareVersions('1.0.0', '1.0.0.1')).toBe(-1);
});
});
// Test the update check functionality
describe('Update check', () => {
let displayUpgradeNotification;
let consoleLogSpy;
beforeAll(async () => {
// Import the function we want to test dynamically
const commandsModule = await import('../../scripts/modules/commands.js');
displayUpgradeNotification = commandsModule.displayUpgradeNotification;
});
beforeEach(() => {
// Spy on console.log
consoleLogSpy = jest.spyOn(console, 'log').mockImplementation(() => {});
});
afterEach(() => {
consoleLogSpy.mockRestore();
});
test('displays upgrade notification when newer version is available', () => {
// Test displayUpgradeNotification function
displayUpgradeNotification('1.0.0', '1.1.0');
expect(consoleLogSpy).toHaveBeenCalled();
expect(consoleLogSpy.mock.calls[0][0]).toContain('Update Available!');
expect(consoleLogSpy.mock.calls[0][0]).toContain('1.0.0');
expect(consoleLogSpy.mock.calls[0][0]).toContain('1.1.0');
});
});

View File

@@ -144,3 +144,254 @@ describe('Windsurf Rules File Handling', () => {
);
});
});
// New test suite for MCP Configuration Handling
describe('MCP Configuration Handling', () => {
let tempDir;
beforeEach(() => {
jest.clearAllMocks();
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return JSON.stringify({
"mcpServers": {
"existing-server": {
"command": "node",
"args": ["server.js"]
}
}
});
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
// Return true for specific paths to test different scenarios
if (filePath.toString().includes('package.json')) {
return true;
}
// Default to false for other paths
return false;
});
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
});
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
// Test function that simulates the behavior of setupMCPConfiguration
function mockSetupMCPConfiguration(targetDir, projectName) {
const mcpDirPath = path.join(targetDir, '.cursor');
const mcpJsonPath = path.join(mcpDirPath, 'mcp.json');
// Create .cursor directory if it doesn't exist
if (!fs.existsSync(mcpDirPath)) {
fs.mkdirSync(mcpDirPath, { recursive: true });
}
// New MCP config to be added - references the installed package
const newMCPServer = {
"task-master-ai": {
"command": "npx",
"args": [
"task-master-ai",
"mcp-server"
]
}
};
// Check if mcp.json already exists
if (fs.existsSync(mcpJsonPath)) {
try {
// Read existing config
const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, 'utf8'));
// Initialize mcpServers if it doesn't exist
if (!mcpConfig.mcpServers) {
mcpConfig.mcpServers = {};
}
// Add the taskmaster-ai server if it doesn't exist
if (!mcpConfig.mcpServers["task-master-ai"]) {
mcpConfig.mcpServers["task-master-ai"] = newMCPServer["task-master-ai"];
}
// Write the updated configuration
fs.writeFileSync(
mcpJsonPath,
JSON.stringify(mcpConfig, null, 4)
);
} catch (error) {
// Create new configuration on error
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
}
} else {
// If mcp.json doesn't exist, create it
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
}
}
test('creates mcp.json when it does not exist', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
// Should create a proper structure with mcpServers key
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('mcpServers')
);
// Should reference npx command
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('npx')
);
});
test('updates existing mcp.json by adding new server', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override the existsSync mock to simulate mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Should preserve existing server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('existing-server')
);
// Should add our new server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
});
test('handles JSON parsing errors by creating new mcp.json', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override existsSync to say mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// But make readFileSync return invalid JSON
fs.readFileSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return '{invalid json';
}
return '{}';
});
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Should create a new valid JSON file with our server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
});
test('does not modify existing server configuration if it already exists', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override existsSync to say mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// Return JSON that already has task-master-ai
fs.readFileSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return JSON.stringify({
"mcpServers": {
"existing-server": {
"command": "node",
"args": ["server.js"]
},
"task-master-ai": {
"command": "custom",
"args": ["custom-args"]
}
}
});
}
return '{}';
});
// Spy to check what's written
const writeFileSyncSpy = jest.spyOn(fs, 'writeFileSync');
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Verify the written data contains the original taskmaster configuration
const dataWritten = JSON.parse(writeFileSyncSpy.mock.calls[0][1]);
expect(dataWritten.mcpServers["task-master-ai"].command).toBe("custom");
expect(dataWritten.mcpServers["task-master-ai"].args).toContain("custom-args");
});
test('creates the .cursor directory if it doesnt exist', () => {
// Arrange
const cursorDirPath = path.join(tempDir, '.cursor');
// Make sure it looks like the directory doesn't exist
fs.existsSync.mockReturnValue(false);
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
expect(fs.mkdirSync).toHaveBeenCalledWith(cursorDirPath, { recursive: true });
});
});