Merge crunchyman/feat.add.mcp.2 into next

This commit is contained in:
Eyal Toledano
2025-03-29 17:26:04 -04:00
28 changed files with 28053 additions and 191 deletions

10
.cursor/mcp.json Normal file
View File

@@ -0,0 +1,10 @@
{
"mcpServers": {
"taskmaster-ai": {
"command": "node",
"args": [
"./mcp-server/server.js"
]
}
}
}

View File

@@ -52,6 +52,28 @@ alwaysApply: false
> **Note**: Although options are defined with kebab-case (`--num-tasks`), Commander.js stores them internally as camelCase properties. Access them in code as `options.numTasks`, not `options['num-tasks']`.
- **Boolean Flag Conventions**:
- ✅ DO: Use positive flags with `--skip-` prefix for disabling behavior
- ❌ DON'T: Use negated boolean flags with `--no-` prefix
- ✅ DO: Use consistent flag handling across all commands
```javascript
// ✅ DO: Use positive flag with skip- prefix
.option('--skip-generate', 'Skip generating task files')
// ❌ DON'T: Use --no- prefix
.option('--no-generate', 'Skip generating task files')
```
> **Important**: When handling boolean flags in the code, make your intent clear:
```javascript
// ✅ DO: Use clear variable naming that matches the flag's intent
const generateFiles = !options.skipGenerate;
// ❌ DON'T: Use confusing double negatives
const dontSkipGenerate = !options.skipGenerate;
```
## Input Validation
- **Required Parameters**:
@@ -80,6 +102,38 @@ alwaysApply: false
}
```
- **Enhanced Input Validation**:
- ✅ DO: Validate file existence for critical file operations
- ✅ DO: Provide context-specific validation for identifiers
- ✅ DO: Check required API keys for features that depend on them
```javascript
// ✅ DO: Validate file existence
if (!fs.existsSync(tasksPath)) {
console.error(chalk.red(`Error: Tasks file not found at path: ${tasksPath}`));
if (tasksPath === 'tasks/tasks.json') {
console.log(chalk.yellow('Hint: Run task-master init or task-master parse-prd to create tasks.json first'));
} else {
console.log(chalk.yellow(`Hint: Check if the file path is correct: ${tasksPath}`));
}
process.exit(1);
}
// ✅ DO: Validate task ID
const taskId = parseInt(options.id, 10);
if (isNaN(taskId) || taskId <= 0) {
console.error(chalk.red(`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`));
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
process.exit(1);
}
// ✅ DO: Check for required API keys
if (useResearch && !process.env.PERPLEXITY_API_KEY) {
console.log(chalk.yellow('Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'));
console.log(chalk.yellow('Falling back to Claude AI for task update.'));
}
```
## User Feedback
- **Operation Status**:
@@ -101,6 +155,26 @@ alwaysApply: false
}
```
- **Success Messages with Next Steps**:
- ✅ DO: Use boxen for important success messages with clear formatting
- ✅ DO: Provide suggested next steps after command completion
- ✅ DO: Include ready-to-use commands for follow-up actions
```javascript
// ✅ DO: Display success with next steps
console.log(boxen(
chalk.white.bold(`Subtask ${parentId}.${subtask.id} Added Successfully`) + '\n\n' +
chalk.white(`Title: ${subtask.title}`) + '\n' +
chalk.white(`Status: ${getStatusWithColor(subtask.status)}`) + '\n' +
(dependencies.length > 0 ? chalk.white(`Dependencies: ${dependencies.join(', ')}`) + '\n' : '') +
'\n' +
chalk.white.bold('Next Steps:') + '\n' +
chalk.cyan(`1. Run ${chalk.yellow(`task-master show ${parentId}`)} to see the parent task with all subtasks`) + '\n' +
chalk.cyan(`2. Run ${chalk.yellow(`task-master set-status --id=${parentId}.${subtask.id} --status=in-progress`)} to start working on it`),
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
));
```
## Command Registration
- **Command Grouping**:
@@ -117,7 +191,10 @@ alwaysApply: false
export {
registerCommands,
setupCLI,
runCLI
runCLI,
checkForUpdate, // Include version checking functions
compareVersions,
displayUpgradeNotification
};
```
@@ -143,6 +220,88 @@ alwaysApply: false
}
```
- **Unknown Options Handling**:
- ✅ DO: Provide clear error messages for unknown options
- ✅ DO: Show available options when an unknown option is used
- ✅ DO: Include command-specific help displays for common errors
- ❌ DON'T: Allow unknown options with `.allowUnknownOption()`
```javascript
// ✅ DO: Register global error handlers for unknown options
programInstance.on('option:unknown', function(unknownOption) {
const commandName = this._name || 'unknown';
console.error(chalk.red(`Error: Unknown option '${unknownOption}'`));
console.error(chalk.yellow(`Run 'task-master ${commandName} --help' to see available options`));
process.exit(1);
});
// ✅ DO: Add command-specific help displays
function showCommandHelp() {
console.log(boxen(
chalk.white.bold('Command Help') + '\n\n' +
chalk.cyan('Usage:') + '\n' +
` task-master command --option1=<value> [options]\n\n` +
chalk.cyan('Options:') + '\n' +
' --option1 <value> Description of option1 (required)\n' +
' --option2 <value> Description of option2\n\n' +
chalk.cyan('Examples:') + '\n' +
' task-master command --option1=value --option2=value',
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
));
}
```
- **Global Error Handling**:
- ✅ DO: Set up global error handlers for uncaught exceptions
- ✅ DO: Detect and format Commander-specific errors
- ✅ DO: Provide suitable guidance for fixing common errors
```javascript
// ✅ DO: Set up global error handlers with helpful messages
process.on('uncaughtException', (err) => {
// Handle Commander-specific errors
if (err.code === 'commander.unknownOption') {
const option = err.message.match(/'([^']+)'/)?.[1];
console.error(chalk.red(`Error: Unknown option '${option}'`));
console.error(chalk.yellow(`Run 'task-master <command> --help' to see available options`));
process.exit(1);
}
// Handle other error types...
console.error(chalk.red(`Error: ${err.message}`));
process.exit(1);
});
```
- **Contextual Error Handling**:
- ✅ DO: Provide specific error handling for common issues
- ✅ DO: Include troubleshooting hints for each error type
- ✅ DO: Use consistent error formatting across all commands
```javascript
// ✅ DO: Provide specific error handling with guidance
try {
// Implementation
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
// Provide more helpful error messages for common issues
if (error.message.includes('task') && error.message.includes('not found')) {
console.log(chalk.yellow('\nTo fix this issue:'));
console.log(' 1. Run task-master list to see all available task IDs');
console.log(' 2. Use a valid task ID with the --id parameter');
} else if (error.message.includes('API key')) {
console.log(chalk.yellow('\nThis error is related to API keys. Check your environment variables.'));
}
if (CONFIG.debug) {
console.error(error);
}
process.exit(1);
}
```
## Integration with Other Modules
- **Import Organization**:
@@ -155,6 +314,7 @@ alwaysApply: false
import { program } from 'commander';
import path from 'path';
import chalk from 'chalk';
import https from 'https';
import { CONFIG, log, readJSON } from './utils.js';
import { displayBanner, displayHelp } from './ui.js';
@@ -172,30 +332,22 @@ alwaysApply: false
.description('Add a new subtask to a parent task or convert an existing task to a subtask')
.option('-f, --file <path>', 'Path to the tasks file', 'tasks/tasks.json')
.option('-p, --parent <id>', 'ID of the parent task (required)')
.option('-e, --existing <id>', 'ID of an existing task to convert to a subtask')
.option('-i, --task-id <id>', 'Existing task ID to convert to subtask')
.option('-t, --title <title>', 'Title for the new subtask (when not converting)')
.option('-d, --description <description>', 'Description for the new subtask (when not converting)')
.option('--details <details>', 'Implementation details for the new subtask (when not converting)')
.option('--dependencies <ids>', 'Comma-separated list of subtask IDs this subtask depends on')
.option('--status <status>', 'Initial status for the subtask', 'pending')
.option('--skip-generate', 'Skip regenerating task files')
.action(async (options) => {
// Validate required parameters
if (!options.parent) {
console.error(chalk.red('Error: --parent parameter is required'));
showAddSubtaskHelp(); // Show contextual help
process.exit(1);
}
// Validate that either existing task ID or title is provided
if (!options.existing && !options.title) {
console.error(chalk.red('Error: Either --existing or --title must be provided'));
process.exit(1);
}
try {
// Implementation
} catch (error) {
// Error handling
}
// Implementation with detailed error handling
});
```
@@ -208,25 +360,75 @@ alwaysApply: false
.option('-f, --file <path>', 'Path to the tasks file', 'tasks/tasks.json')
.option('-i, --id <id>', 'ID of the subtask to remove in format "parentId.subtaskId" (required)')
.option('-c, --convert', 'Convert the subtask to a standalone task')
.option('--skip-generate', 'Skip regenerating task files')
.action(async (options) => {
// Validate required parameters
if (!options.id) {
console.error(chalk.red('Error: --id parameter is required'));
process.exit(1);
}
// Validate subtask ID format
if (!options.id.includes('.')) {
console.error(chalk.red('Error: Subtask ID must be in format "parentId.subtaskId"'));
process.exit(1);
}
try {
// Implementation
} catch (error) {
// Error handling
}
// Implementation with detailed error handling
})
.on('error', function(err) {
console.error(chalk.red(`Error: ${err.message}`));
showRemoveSubtaskHelp(); // Show contextual help
process.exit(1);
});
```
## Version Checking and Updates
- **Automatic Version Checking**:
- ✅ DO: Implement version checking to notify users of available updates
- ✅ DO: Use non-blocking version checks that don't delay command execution
- ✅ DO: Display update notifications after command completion
```javascript
// ✅ DO: Implement version checking function
async function checkForUpdate() {
// Implementation details...
return { currentVersion, latestVersion, needsUpdate };
}
// ✅ DO: Implement semantic version comparison
function compareVersions(v1, v2) {
const v1Parts = v1.split('.').map(p => parseInt(p, 10));
const v2Parts = v2.split('.').map(p => parseInt(p, 10));
// Implementation details...
return result; // -1, 0, or 1
}
// ✅ DO: Display attractive update notifications
function displayUpgradeNotification(currentVersion, latestVersion) {
const message = boxen(
`${chalk.blue.bold('Update Available!')} ${chalk.dim(currentVersion)} → ${chalk.green(latestVersion)}\n\n` +
`Run ${chalk.cyan('npm i task-master-ai@latest -g')} to update to the latest version with new features and bug fixes.`,
{
padding: 1,
margin: { top: 1, bottom: 1 },
borderColor: 'yellow',
borderStyle: 'round'
}
);
console.log(message);
}
// ✅ DO: Integrate version checking in CLI run function
async function runCLI(argv = process.argv) {
try {
// Start the update check in the background - don't await yet
const updateCheckPromise = checkForUpdate();
// Setup and parse
const programInstance = setupCLI();
await programInstance.parseAsync(argv);
// After command execution, check if an update is available
const updateInfo = await updateCheckPromise;
if (updateInfo.needsUpdate) {
displayUpgradeNotification(updateInfo.currentVersion, updateInfo.latestVersion);
}
} catch (error) {
// Error handling...
}
}
```
Refer to [`commands.js`](mdc:scripts/modules/commands.js) for implementation examples and [`new_features.mdc`](mdc:.cursor/rules/new_features.mdc) for integration guidelines.

View File

@@ -13,6 +13,7 @@ import { Command } from 'commander';
import { displayHelp, displayBanner } from '../scripts/modules/ui.js';
import { registerCommands } from '../scripts/modules/commands.js';
import { detectCamelCaseFlags } from '../scripts/modules/utils.js';
import chalk from 'chalk';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
@@ -167,7 +168,7 @@ function createDevScriptAction(commandName) {
if (value === true) {
args.push(`--${kebabKey}`);
} else if (value === false && key === 'generate') {
args.push('--no-generate');
args.push('--skip-generate');
}
} else {
// Always use kebab-case for option names
@@ -253,7 +254,6 @@ registerInitCommand(program);
program
.command('dev')
.description('Run the dev.js script')
.allowUnknownOption(true)
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
runDevScript(args);
@@ -273,8 +273,7 @@ tempProgram.commands.forEach(cmd => {
// Create a new command with the same name and description
const newCmd = program
.command(cmd.name())
.description(cmd.description())
.allowUnknownOption(); // Allow any options, including camelCase ones
.description(cmd.description());
// Copy all options
cmd.options.forEach(opt => {
@@ -292,6 +291,39 @@ tempProgram.commands.forEach(cmd => {
// Parse the command line arguments
program.parse(process.argv);
// Add global error handling for unknown commands and options
process.on('uncaughtException', (err) => {
// Check if this is a commander.js unknown option error
if (err.code === 'commander.unknownOption') {
const option = err.message.match(/'([^']+)'/)?.[1];
const commandArg = process.argv.find(arg => !arg.startsWith('-') &&
arg !== 'task-master' &&
!arg.includes('/') &&
arg !== 'node');
const command = commandArg || 'unknown';
console.error(chalk.red(`Error: Unknown option '${option}'`));
console.error(chalk.yellow(`Run 'task-master ${command} --help' to see available options for this command`));
process.exit(1);
}
// Check if this is a commander.js unknown command error
if (err.code === 'commander.unknownCommand') {
const command = err.message.match(/'([^']+)'/)?.[1];
console.error(chalk.red(`Error: Unknown command '${command}'`));
console.error(chalk.yellow(`Run 'task-master --help' to see available commands`));
process.exit(1);
}
// Handle other uncaught exceptions
console.error(chalk.red(`Error: ${err.message}`));
if (process.env.DEBUG === '1') {
console.error(err);
}
process.exit(1);
});
// Show help if no command was provided (just 'task-master' with no args)
if (process.argv.length <= 2) {
displayBanner();

269
docs/MCP_INTEGRATION.md Normal file
View File

@@ -0,0 +1,269 @@
# Task Master MCP Integration
This document outlines how Task Master CLI functionality is integrated with MCP (Master Control Program) architecture to provide both CLI and programmatic API access to features.
## Architecture Overview
The MCP integration uses a layered approach:
1. **Core Functions** - In `scripts/modules/` contain the main business logic
2. **Source Parameter** - Core functions check the `source` parameter to determine behavior
3. **Task Master Core** - In `mcp-server/src/core/task-master-core.js` provides direct function imports
4. **MCP Tools** - In `mcp-server/src/tools/` register the functions with the MCP server
```
┌─────────────────┐ ┌─────────────────┐
│ CLI User │ │ MCP User │
└────────┬────────┘ └────────┬────────┘
│ │
▼ ▼
┌────────────────┐ ┌────────────────────┐
│ commands.js │ │ MCP Tool API │
└────────┬───────┘ └──────────┬─────────┘
│ │
│ │
▼ ▼
┌───────────────────────────────────────────────┐
│ │
│ Core Modules (task-manager.js, etc.) │
│ │
└───────────────────────────────────────────────┘
```
## Core Function Pattern
Core functions should follow this pattern to support both CLI and MCP use:
```javascript
/**
* Example function with source parameter support
* @param {Object} options - Additional options including source
* @returns {Object|undefined} - Returns data when source is 'mcp'
*/
function exampleFunction(param1, param2, options = {}) {
try {
// Skip UI for MCP
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Processing operation...'));
}
// Do the core business logic
const result = doSomething(param1, param2);
// For MCP, return structured data
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// For CLI, display output
console.log(chalk.green('Operation completed successfully!'));
} catch (error) {
// Handle errors based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
// CLI error handling
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
}
```
## Source-Adapter Utilities
For convenience, you can use the source adapter helpers in `scripts/modules/source-adapter.js`:
```javascript
import { adaptForMcp, sourceSplitFunction } from './source-adapter.js';
// Simple adaptation - just adds source parameter support
export const simpleFunction = adaptForMcp(originalFunction);
// Split implementation - completely different code paths for CLI vs MCP
export const complexFunction = sourceSplitFunction(
// CLI version with UI
function(param1, param2) {
displayBanner();
console.log(`Processing ${param1}...`);
// ... CLI implementation
},
// MCP version with structured return
function(param1, param2, options = {}) {
// ... MCP implementation
return { success: true, data };
}
);
```
## Adding New Features
When adding new features, follow these steps to ensure CLI and MCP compatibility:
1. **Implement Core Logic** in the appropriate module file
2. **Add Source Parameter Support** using the pattern above
3. **Add to task-master-core.js** to make it available for direct import
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
5. **Create Tool Implementation** in `mcp-server/src/tools/`
6. **Register the Tool** in `mcp-server/src/tools/index.js`
### Core Function Implementation
```javascript
// In scripts/modules/task-manager.js
export async function newFeature(param1, param2, options = {}) {
try {
// Source-specific UI
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Running new feature...'));
}
// Shared core logic
const result = processFeature(param1, param2);
// Source-specific return handling
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// CLI output
console.log(chalk.green('Feature completed successfully!'));
displayOutput(result);
} catch (error) {
// Error handling based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
}
```
### Task Master Core Update
```javascript
// In mcp-server/src/core/task-master-core.js
import { newFeature } from '../../../scripts/modules/task-manager.js';
// Add to exports
export default {
// ... existing functions
async newFeature(args = {}, options = {}) {
const { param1, param2 } = args;
return executeFunction(newFeature, [param1, param2], options);
}
};
```
### Command Map Update
```javascript
// In mcp-server/src/tools/utils.js
const commandMap = {
// ... existing mappings
'new-feature': 'newFeature'
};
```
### Tool Implementation
```javascript
// In mcp-server/src/tools/newFeature.js
import { z } from "zod";
import {
executeTaskMasterCommand,
createContentResponse,
createErrorResponse,
} from "./utils.js";
export function registerNewFeatureTool(server) {
server.addTool({
name: "newFeature",
description: "Run the new feature",
parameters: z.object({
param1: z.string().describe("First parameter"),
param2: z.number().optional().describe("Second parameter"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z.string().describe("Root directory of the project")
}),
execute: async (args, { log }) => {
try {
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
const cmdArgs = [];
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
if (args.file) cmdArgs.push(`--file=${args.file}`);
const projectRoot = args.projectRoot;
// Execute the command
const result = await executeTaskMasterCommand(
"new-feature",
log,
cmdArgs,
projectRoot
);
if (!result.success) {
throw new Error(result.error);
}
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error in new feature: ${error.message}`);
return createErrorResponse(`Error in new feature: ${error.message}`);
}
},
});
}
```
### Tool Registration
```javascript
// In mcp-server/src/tools/index.js
import { registerNewFeatureTool } from "./newFeature.js";
export function registerTaskMasterTools(server) {
// ... existing registrations
registerNewFeatureTool(server);
}
```
## Testing
Always test your MCP-compatible features with both CLI and MCP interfaces:
```javascript
// Test CLI usage
node scripts/dev.js new-feature --param1=test --param2=123
// Test MCP usage
node mcp-server/tests/test-command.js newFeature
```
## Best Practices
1. **Keep Core Logic DRY** - Share as much logic as possible between CLI and MCP
2. **Structured Data for MCP** - Return clean JSON objects from MCP source functions
3. **Consistent Error Handling** - Standardize error formats for both interfaces
4. **Documentation** - Update MCP tool documentation when adding new features
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature

3849
docs/fastmcp-docs.txt Normal file

File diff suppressed because it is too large Load Diff

14618
docs/mcp-js-sdk-docs.txt Normal file

File diff suppressed because it is too large Load Diff

6649
docs/mcp-protocol-docs.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -17,7 +17,8 @@ export default {
// The glob patterns Jest uses to detect test files
testMatch: [
'**/__tests__/**/*.js',
'**/?(*.)+(spec|test).js'
'**/?(*.)+(spec|test).js',
'**/tests/*.test.js'
],
// Transform files

View File

@@ -30,9 +30,9 @@ class TaskMasterMCPServer {
this.server = new FastMCP(this.options);
this.initialized = false;
this.server.addResource({});
// this.server.addResource({});
this.server.addResourceTemplate({});
// this.server.addResourceTemplate({});
// Bind methods
this.init = this.init.bind(this);

View File

@@ -45,8 +45,6 @@ export function registerAddTaskTool(server) {
if (args.priority) cmdArgs.push(`--priority=${args.priority}`);
if (args.file) cmdArgs.push(`--file=${args.file}`);
const projectRoot = args.projectRoot;
const result = executeTaskMasterCommand(
"add-task",
log,

175
package-lock.json generated
View File

@@ -73,26 +73,6 @@
"node-fetch": "^2.6.7"
}
},
"node_modules/@anthropic-ai/sdk/node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/@babel/code-frame": {
"version": "7.26.2",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz",
@@ -819,27 +799,6 @@
"node-fetch": "^2.5.0"
}
},
"node_modules/@changesets/get-github-info/node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"dev": true,
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/@changesets/get-release-plan": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/@changesets/get-release-plan/-/get-release-plan-4.0.8.tgz",
@@ -3014,6 +2973,15 @@
"node": ">= 8"
}
},
"node_modules/data-uri-to-buffer": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz",
"integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==",
"license": "MIT",
"engines": {
"node": ">= 12"
}
},
"node_modules/dataloader": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/dataloader/-/dataloader-1.4.0.tgz",
@@ -3753,6 +3721,38 @@
"bser": "2.1.1"
}
},
"node_modules/fetch-blob": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz",
"integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/jimmywarting"
},
{
"type": "paypal",
"url": "https://paypal.me/jimmywarting"
}
],
"license": "MIT",
"dependencies": {
"node-domexception": "^1.0.0",
"web-streams-polyfill": "^3.0.3"
},
"engines": {
"node": "^12.20 || >= 14.13"
}
},
"node_modules/fetch-blob/node_modules/web-streams-polyfill": {
"version": "3.3.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz",
"integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==",
"license": "MIT",
"engines": {
"node": ">= 8"
}
},
"node_modules/fflate": {
"version": "0.8.2",
"resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz",
@@ -3898,6 +3898,18 @@
"node": ">= 12.20"
}
},
"node_modules/formdata-polyfill": {
"version": "4.0.10",
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
"integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==",
"license": "MIT",
"dependencies": {
"fetch-blob": "^3.1.2"
},
"engines": {
"node": ">=12.20.0"
}
},
"node_modules/formidable": {
"version": "3.5.2",
"resolved": "https://registry.npmjs.org/formidable/-/formidable-3.5.2.tgz",
@@ -5752,6 +5764,24 @@
"node": ">=10.5.0"
}
},
"node_modules/node-fetch": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz",
"integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==",
"license": "MIT",
"dependencies": {
"data-uri-to-buffer": "^4.0.0",
"fetch-blob": "^3.1.4",
"formdata-polyfill": "^4.0.10"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/node-fetch"
}
},
"node_modules/node-int64": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz",
@@ -5876,26 +5906,6 @@
}
}
},
"node_modules/openai/node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/ora": {
"version": "8.2.0",
"resolved": "https://registry.npmjs.org/ora/-/ora-8.2.0.tgz",
@@ -6289,15 +6299,6 @@
"node": ">= 0.10"
}
},
"node_modules/punycode": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
"integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/pure-rand": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz",
@@ -7171,18 +7172,6 @@
"url": "https://github.com/sponsors/Borewit"
}
},
"node_modules/tr46": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz",
"integrity": "sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==",
"license": "MIT",
"dependencies": {
"punycode": "^2.1.1"
},
"engines": {
"node": ">=12"
}
},
"node_modules/type-detect": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
@@ -7365,28 +7354,6 @@
"node": ">= 14"
}
},
"node_modules/webidl-conversions": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz",
"integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==",
"license": "BSD-2-Clause",
"engines": {
"node": ">=12"
}
},
"node_modules/whatwg-url": {
"version": "11.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-11.0.0.tgz",
"integrity": "sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==",
"license": "MIT",
"dependencies": {
"tr46": "^3.0.0",
"webidl-conversions": "^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",

View File

@@ -7,7 +7,7 @@
"bin": {
"task-master": "bin/task-master.js",
"task-master-init": "bin/task-master-init.js",
"task-master-mcp": "mcp-server/server.js"
"task-master-mcp-server": "mcp-server/server.js"
},
"scripts": {
"test": "node --experimental-vm-modules node_modules/.bin/jest",
@@ -74,7 +74,7 @@
"mcp-server/**"
],
"overrides": {
"node-fetch": "^2.6.7",
"node-fetch": "^3.3.2",
"whatwg-url": "^11.0.0"
},
"devDependencies": {

View File

@@ -94,6 +94,9 @@ node scripts/dev.js update --from=4 --prompt="Refactor tasks from ID 4 onward to
# Update all tasks (default from=1)
node scripts/dev.js update --prompt="Add authentication to all relevant tasks"
# With research-backed updates using Perplexity AI
node scripts/dev.js update --from=4 --prompt="Integrate OAuth 2.0" --research
# Specify a different tasks file
node scripts/dev.js update --file=custom-tasks.json --from=5 --prompt="Change database from MongoDB to PostgreSQL"
```
@@ -102,6 +105,27 @@ Notes:
- The `--prompt` parameter is required and should explain the changes or new context
- Only tasks that aren't marked as 'done' will be updated
- Tasks with ID >= the specified --from value will be updated
- The `--research` flag uses Perplexity AI for more informed updates when available
## Updating a Single Task
The `update-task` command allows you to update a specific task instead of multiple tasks:
```bash
# Update a specific task with new information
node scripts/dev.js update-task --id=4 --prompt="Use JWT for authentication"
# With research-backed updates using Perplexity AI
node scripts/dev.js update-task --id=4 --prompt="Use JWT for authentication" --research
```
This command:
- Updates only the specified task rather than a range of tasks
- Provides detailed validation with helpful error messages
- Checks for required API keys when using research mode
- Falls back gracefully if Perplexity API is unavailable
- Preserves tasks that are already marked as "done"
- Includes contextual error handling for common issues
## Setting Task Status
@@ -426,4 +450,95 @@ This command:
- Commands for working with subtasks
- For subtasks, provides a link to view the parent task
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
## Enhanced Error Handling
The script now includes improved error handling throughout all commands:
1. **Detailed Validation**:
- Required parameters (like task IDs and prompts) are validated early
- File existence is checked with customized errors for common scenarios
- Parameter type conversion is handled with clear error messages
2. **Contextual Error Messages**:
- Task not found errors include suggestions to run the list command
- API key errors include reminders to check environment variables
- Invalid ID format errors show the expected format
3. **Command-Specific Help Displays**:
- When validation fails, detailed help for the specific command is shown
- Help displays include usage examples and parameter descriptions
- Formatted in clear, color-coded boxes with examples
4. **Helpful Error Recovery**:
- Detailed troubleshooting steps for common errors
- Graceful fallbacks for missing optional dependencies
- Clear instructions for how to fix configuration issues
## Version Checking
The script now automatically checks for updates without slowing down execution:
1. **Background Version Checking**:
- Non-blocking version checks run in the background while commands execute
- Actual command execution isn't delayed by version checking
- Update notifications appear after command completion
2. **Update Notifications**:
- When a newer version is available, a notification is displayed
- Notifications include current version, latest version, and update command
- Formatted in an attention-grabbing box with clear instructions
3. **Implementation Details**:
- Uses semantic versioning to compare current and latest versions
- Fetches version information from npm registry with a timeout
- Gracefully handles connection issues without affecting command execution
## Subtask Management
The script now includes enhanced commands for managing subtasks:
### Adding Subtasks
```bash
# Add a subtask to an existing task
node scripts/dev.js add-subtask --parent=5 --title="Implement login UI" --description="Create login form"
# Convert an existing task to a subtask
node scripts/dev.js add-subtask --parent=5 --task-id=8
# Add a subtask with dependencies
node scripts/dev.js add-subtask --parent=5 --title="Authentication middleware" --dependencies=5.1,5.2
# Skip regenerating task files
node scripts/dev.js add-subtask --parent=5 --title="Login API route" --skip-generate
```
Key features:
- Create new subtasks with detailed properties or convert existing tasks
- Define dependencies between subtasks
- Set custom status for new subtasks
- Provides next-step suggestions after creation
### Removing Subtasks
```bash
# Remove a subtask
node scripts/dev.js remove-subtask --id=5.2
# Remove multiple subtasks
node scripts/dev.js remove-subtask --id=5.2,5.3,5.4
# Convert a subtask to a standalone task
node scripts/dev.js remove-subtask --id=5.2 --convert
# Skip regenerating task files
node scripts/dev.js remove-subtask --id=5.2 --skip-generate
```
Key features:
- Remove subtasks individually or in batches
- Optionally convert subtasks to standalone tasks
- Control whether task files are regenerated
- Provides detailed success messages and next steps

View File

@@ -39,6 +39,7 @@ program
.option('-a, --author <author>', 'Author name')
.option('--skip-install', 'Skip installing dependencies')
.option('--dry-run', 'Show what would be done without making changes')
.option('--aliases', 'Add shell aliases (tm, taskmaster)')
.parse(process.argv);
const options = program.opts();
@@ -133,6 +134,53 @@ function ensureDirectoryExists(dirPath) {
}
}
// Function to add shell aliases to the user's shell configuration
function addShellAliases() {
const homeDir = process.env.HOME || process.env.USERPROFILE;
let shellConfigFile;
// Determine which shell config file to use
if (process.env.SHELL?.includes('zsh')) {
shellConfigFile = path.join(homeDir, '.zshrc');
} else if (process.env.SHELL?.includes('bash')) {
shellConfigFile = path.join(homeDir, '.bashrc');
} else {
log('warn', 'Could not determine shell type. Aliases not added.');
return false;
}
try {
// Check if file exists
if (!fs.existsSync(shellConfigFile)) {
log('warn', `Shell config file ${shellConfigFile} not found. Aliases not added.`);
return false;
}
// Check if aliases already exist
const configContent = fs.readFileSync(shellConfigFile, 'utf8');
if (configContent.includes('alias tm=\'task-master\'')) {
log('info', 'Task Master aliases already exist in shell config.');
return true;
}
// Add aliases to the shell config file
const aliasBlock = `
# Task Master aliases added on ${new Date().toLocaleDateString()}
alias tm='task-master'
alias taskmaster='task-master'
`;
fs.appendFileSync(shellConfigFile, aliasBlock);
log('success', `Added Task Master aliases to ${shellConfigFile}`);
log('info', 'To use the aliases in your current terminal, run: source ' + shellConfigFile);
return true;
} catch (error) {
log('error', `Failed to add aliases: ${error.message}`);
return false;
}
}
// Function to copy a file from the package to the target directory
function copyTemplateFile(templateName, targetPath, replacements = {}) {
// Get the file content from the appropriate source directory
@@ -299,6 +347,7 @@ async function initializeProject(options = {}) {
const authorName = options.authorName || '';
const dryRun = options.dryRun || false;
const skipInstall = options.skipInstall || false;
const addAliases = options.addAliases || false;
if (dryRun) {
log('info', 'DRY RUN MODE: No files will be modified');
@@ -306,6 +355,9 @@ async function initializeProject(options = {}) {
log('info', `Description: ${projectDescription}`);
log('info', `Author: ${authorName || 'Not specified'}`);
log('info', 'Would create/update necessary project files');
if (addAliases) {
log('info', 'Would add shell aliases for task-master');
}
if (!skipInstall) {
log('info', 'Would install dependencies');
}
@@ -318,7 +370,7 @@ async function initializeProject(options = {}) {
};
}
createProjectStructure(projectName, projectDescription, projectVersion, authorName, skipInstall);
createProjectStructure(projectName, projectDescription, projectVersion, authorName, skipInstall, addAliases);
return {
projectName,
projectDescription,
@@ -340,6 +392,10 @@ async function initializeProject(options = {}) {
const projectVersionInput = await promptQuestion(rl, chalk.cyan('Enter project version (default: 1.0.0): '));
const authorName = await promptQuestion(rl, chalk.cyan('Enter your name: '));
// Ask about shell aliases
const addAliasesInput = await promptQuestion(rl, chalk.cyan('Add shell aliases for task-master? (Y/n): '));
const addAliases = addAliasesInput.trim().toLowerCase() !== 'n';
// Set default version if not provided
const projectVersion = projectVersionInput.trim() ? projectVersionInput : '1.0.0';
@@ -349,6 +405,7 @@ async function initializeProject(options = {}) {
console.log(chalk.blue('Description:'), chalk.white(projectDescription));
console.log(chalk.blue('Version:'), chalk.white(projectVersion));
console.log(chalk.blue('Author:'), chalk.white(authorName || 'Not specified'));
console.log(chalk.blue('Add shell aliases:'), chalk.white(addAliases ? 'Yes' : 'No'));
const confirmInput = await promptQuestion(rl, chalk.yellow('\nDo you want to continue with these settings? (Y/n): '));
const shouldContinue = confirmInput.trim().toLowerCase() !== 'n';
@@ -367,6 +424,9 @@ async function initializeProject(options = {}) {
if (dryRun) {
log('info', 'DRY RUN MODE: No files will be modified');
log('info', 'Would create/update necessary project files');
if (addAliases) {
log('info', 'Would add shell aliases for task-master');
}
if (!skipInstall) {
log('info', 'Would install dependencies');
}
@@ -380,7 +440,7 @@ async function initializeProject(options = {}) {
}
// Create the project structure
createProjectStructure(projectName, projectDescription, projectVersion, authorName, skipInstall);
createProjectStructure(projectName, projectDescription, projectVersion, authorName, skipInstall, addAliases);
return {
projectName,
@@ -405,7 +465,7 @@ function promptQuestion(rl, question) {
}
// Function to create the project structure
function createProjectStructure(projectName, projectDescription, projectVersion, authorName, skipInstall) {
function createProjectStructure(projectName, projectDescription, projectVersion, authorName, skipInstall, addAliases) {
const targetDir = process.cwd();
log('info', `Initializing project in ${targetDir}`);
@@ -489,6 +549,9 @@ function createProjectStructure(projectName, projectDescription, projectVersion,
log('success', 'Created package.json');
}
// Setup MCP configuration for integration with Cursor
setupMCPConfiguration(targetDir, packageJson.name);
// Copy template files with replacements
const replacements = {
projectName,
@@ -571,6 +634,11 @@ function createProjectStructure(projectName, projectDescription, projectVersion,
}
));
// Add shell aliases if requested
if (addAliases) {
addShellAliases();
}
// Display next steps in a nice box
console.log(boxen(
chalk.cyan.bold('Things you can now do:') + '\n\n' +
@@ -596,6 +664,84 @@ function createProjectStructure(projectName, projectDescription, projectVersion,
));
}
// Function to setup MCP configuration for Cursor integration
function setupMCPConfiguration(targetDir, projectName) {
const mcpDirPath = path.join(targetDir, '.cursor');
const mcpJsonPath = path.join(mcpDirPath, 'mcp.json');
log('info', 'Setting up MCP configuration for Cursor integration...');
// Create .cursor directory if it doesn't exist
ensureDirectoryExists(mcpDirPath);
// New MCP config to be added - references the installed package
const newMCPServer = {
"task-master-ai": {
"command": "npx",
"args": [
"task-master-ai",
"mcp-server"
]
}
};
// Check if mcp.json already exists
if (fs.existsSync(mcpJsonPath)) {
log('info', 'MCP configuration file already exists, updating...');
try {
// Read existing config
const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, 'utf8'));
// Initialize mcpServers if it doesn't exist
if (!mcpConfig.mcpServers) {
mcpConfig.mcpServers = {};
}
// Add the task-master-ai server if it doesn't exist
if (!mcpConfig.mcpServers["task-master-ai"]) {
mcpConfig.mcpServers["task-master-ai"] = newMCPServer["task-master-ai"];
log('info', 'Added task-master-ai server to existing MCP configuration');
} else {
log('info', 'task-master-ai server already configured in mcp.json');
}
// Write the updated configuration
fs.writeFileSync(
mcpJsonPath,
JSON.stringify(mcpConfig, null, 4)
);
log('success', 'Updated MCP configuration file');
} catch (error) {
log('error', `Failed to update MCP configuration: ${error.message}`);
// Create a backup before potentially modifying
const backupPath = `${mcpJsonPath}.backup-${Date.now()}`;
if (fs.existsSync(mcpJsonPath)) {
fs.copyFileSync(mcpJsonPath, backupPath);
log('info', `Created backup of existing mcp.json at ${backupPath}`);
}
// Create new configuration
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
log('warn', 'Created new MCP configuration file (backup of original file was created if it existed)');
}
} else {
// If mcp.json doesn't exist, create it
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
log('success', 'Created MCP configuration file for Cursor integration');
}
// Add note to console about MCP integration
log('info', 'MCP server will use the installed task-master-ai package');
}
// Run the initialization if this script is executed directly
// The original check doesn't work with npx and global commands
// if (process.argv[1] === fileURLToPath(import.meta.url)) {
@@ -619,7 +765,8 @@ console.log('process.argv:', process.argv);
projectVersion: options.version || '1.0.0',
authorName: options.author || '',
dryRun: options.dryRun || false,
skipInstall: options.skipInstall || false
skipInstall: options.skipInstall || false,
addAliases: options.aliases || false
});
} else {
// Otherwise, prompt for input normally

View File

@@ -8,6 +8,7 @@ import path from 'path';
import chalk from 'chalk';
import boxen from 'boxen';
import fs from 'fs';
import https from 'https';
import { CONFIG, log, readJSON } from './utils.js';
import {
@@ -22,7 +23,8 @@ import {
addTask,
addSubtask,
removeSubtask,
analyzeTaskComplexity
analyzeTaskComplexity,
updateTaskById
} from './task-manager.js';
import {
@@ -46,6 +48,14 @@ import {
* @param {Object} program - Commander program instance
*/
function registerCommands(programInstance) {
// Add global error handler for unknown options
programInstance.on('option:unknown', function(unknownOption) {
const commandName = this._name || 'unknown';
console.error(chalk.red(`Error: Unknown option '${unknownOption}'`));
console.error(chalk.yellow(`Run 'task-master ${commandName} --help' to see available options`));
process.exit(1);
});
// Default help
programInstance.on('--help', function() {
displayHelp();
@@ -135,6 +145,92 @@ function registerCommands(programInstance) {
await updateTasks(tasksPath, fromId, prompt, useResearch);
});
// updateTask command
programInstance
.command('update-task')
.description('Update a single task by ID with new information')
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.option('-i, --id <id>', 'Task ID to update (required)')
.option('-p, --prompt <text>', 'Prompt explaining the changes or new context (required)')
.option('-r, --research', 'Use Perplexity AI for research-backed task updates')
.action(async (options) => {
try {
const tasksPath = options.file;
// Validate required parameters
if (!options.id) {
console.error(chalk.red('Error: --id parameter is required'));
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
process.exit(1);
}
// Parse the task ID and validate it's a number
const taskId = parseInt(options.id, 10);
if (isNaN(taskId) || taskId <= 0) {
console.error(chalk.red(`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`));
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
process.exit(1);
}
if (!options.prompt) {
console.error(chalk.red('Error: --prompt parameter is required. Please provide information about the changes.'));
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
process.exit(1);
}
const prompt = options.prompt;
const useResearch = options.research || false;
// Validate tasks file exists
if (!fs.existsSync(tasksPath)) {
console.error(chalk.red(`Error: Tasks file not found at path: ${tasksPath}`));
if (tasksPath === 'tasks/tasks.json') {
console.log(chalk.yellow('Hint: Run task-master init or task-master parse-prd to create tasks.json first'));
} else {
console.log(chalk.yellow(`Hint: Check if the file path is correct: ${tasksPath}`));
}
process.exit(1);
}
console.log(chalk.blue(`Updating task ${taskId} with prompt: "${prompt}"`));
console.log(chalk.blue(`Tasks file: ${tasksPath}`));
if (useResearch) {
// Verify Perplexity API key exists if using research
if (!process.env.PERPLEXITY_API_KEY) {
console.log(chalk.yellow('Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'));
console.log(chalk.yellow('Falling back to Claude AI for task update.'));
} else {
console.log(chalk.blue('Using Perplexity AI for research-backed task update'));
}
}
const result = await updateTaskById(tasksPath, taskId, prompt, useResearch);
// If the task wasn't updated (e.g., if it was already marked as done)
if (!result) {
console.log(chalk.yellow('\nTask update was not completed. Review the messages above for details.'));
}
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
// Provide more helpful error messages for common issues
if (error.message.includes('task') && error.message.includes('not found')) {
console.log(chalk.yellow('\nTo fix this issue:'));
console.log(' 1. Run task-master list to see all available task IDs');
console.log(' 2. Use a valid task ID with the --id parameter');
} else if (error.message.includes('API key')) {
console.log(chalk.yellow('\nThis error is related to API keys. Check your environment variables.'));
}
if (CONFIG.debug) {
console.error(error);
}
process.exit(1);
}
});
// generate command
programInstance
.command('generate')
@@ -437,15 +533,16 @@ function registerCommands(programInstance) {
.option('--details <text>', 'Implementation details for the new subtask')
.option('--dependencies <ids>', 'Comma-separated list of dependency IDs for the new subtask')
.option('-s, --status <status>', 'Status for the new subtask', 'pending')
.option('--no-generate', 'Skip regenerating task files')
.option('--skip-generate', 'Skip regenerating task files')
.action(async (options) => {
const tasksPath = options.file;
const parentId = options.parent;
const existingTaskId = options.taskId;
const generateFiles = options.generate;
const generateFiles = !options.skipGenerate;
if (!parentId) {
console.error(chalk.red('Error: --parent parameter is required. Please provide a parent task ID.'));
showAddSubtaskHelp();
process.exit(1);
}
@@ -507,61 +604,127 @@ function registerCommands(programInstance) {
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
})
.on('error', function(err) {
console.error(chalk.red(`Error: ${err.message}`));
showAddSubtaskHelp();
process.exit(1);
});
// Helper function to show add-subtask command help
function showAddSubtaskHelp() {
console.log(boxen(
chalk.white.bold('Add Subtask Command Help') + '\n\n' +
chalk.cyan('Usage:') + '\n' +
` task-master add-subtask --parent=<id> [options]\n\n` +
chalk.cyan('Options:') + '\n' +
' -p, --parent <id> Parent task ID (required)\n' +
' -i, --task-id <id> Existing task ID to convert to subtask\n' +
' -t, --title <title> Title for the new subtask\n' +
' -d, --description <text> Description for the new subtask\n' +
' --details <text> Implementation details for the new subtask\n' +
' --dependencies <ids> Comma-separated list of dependency IDs\n' +
' -s, --status <status> Status for the new subtask (default: "pending")\n' +
' -f, --file <file> Path to the tasks file (default: "tasks/tasks.json")\n' +
' --skip-generate Skip regenerating task files\n\n' +
chalk.cyan('Examples:') + '\n' +
' task-master add-subtask --parent=5 --task-id=8\n' +
' task-master add-subtask -p 5 -t "Implement login UI" -d "Create the login form"',
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
));
}
// remove-subtask command
programInstance
.command('remove-subtask')
.description('Remove a subtask from its parent task')
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.option('-i, --id <id>', 'Subtask ID to remove in format "parentId.subtaskId" (required)')
.option('-i, --id <id>', 'Subtask ID(s) to remove in format "parentId.subtaskId" (can be comma-separated for multiple subtasks)')
.option('-c, --convert', 'Convert the subtask to a standalone task instead of deleting it')
.option('--no-generate', 'Skip regenerating task files')
.option('--skip-generate', 'Skip regenerating task files')
.action(async (options) => {
const tasksPath = options.file;
const subtaskId = options.id;
const subtaskIds = options.id;
const convertToTask = options.convert || false;
const generateFiles = options.generate;
const generateFiles = !options.skipGenerate;
if (!subtaskId) {
console.error(chalk.red('Error: --id parameter is required. Please provide a subtask ID in format "parentId.subtaskId".'));
if (!subtaskIds) {
console.error(chalk.red('Error: --id parameter is required. Please provide subtask ID(s) in format "parentId.subtaskId".'));
showRemoveSubtaskHelp();
process.exit(1);
}
try {
console.log(chalk.blue(`Removing subtask ${subtaskId}...`));
if (convertToTask) {
console.log(chalk.blue('The subtask will be converted to a standalone task'));
}
// Split by comma to support multiple subtask IDs
const subtaskIdArray = subtaskIds.split(',').map(id => id.trim());
const result = await removeSubtask(tasksPath, subtaskId, convertToTask, generateFiles);
if (convertToTask && result) {
// Display success message and next steps for converted task
console.log(boxen(
chalk.white.bold(`Subtask ${subtaskId} Converted to Task #${result.id}`) + '\n\n' +
chalk.white(`Title: ${result.title}`) + '\n' +
chalk.white(`Status: ${getStatusWithColor(result.status)}`) + '\n' +
chalk.white(`Dependencies: ${result.dependencies.join(', ')}`) + '\n\n' +
chalk.white.bold('Next Steps:') + '\n' +
chalk.cyan(`1. Run ${chalk.yellow(`task-master show ${result.id}`)} to see details of the new task`) + '\n' +
chalk.cyan(`2. Run ${chalk.yellow(`task-master set-status --id=${result.id} --status=in-progress`)} to start working on it`),
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
));
} else {
// Display success message for deleted subtask
console.log(boxen(
chalk.white.bold(`Subtask ${subtaskId} Removed`) + '\n\n' +
chalk.white('The subtask has been successfully deleted.'),
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
));
for (const subtaskId of subtaskIdArray) {
// Validate subtask ID format
if (!subtaskId.includes('.')) {
console.error(chalk.red(`Error: Subtask ID "${subtaskId}" must be in format "parentId.subtaskId"`));
showRemoveSubtaskHelp();
process.exit(1);
}
console.log(chalk.blue(`Removing subtask ${subtaskId}...`));
if (convertToTask) {
console.log(chalk.blue('The subtask will be converted to a standalone task'));
}
const result = await removeSubtask(tasksPath, subtaskId, convertToTask, generateFiles);
if (convertToTask && result) {
// Display success message and next steps for converted task
console.log(boxen(
chalk.white.bold(`Subtask ${subtaskId} Converted to Task #${result.id}`) + '\n\n' +
chalk.white(`Title: ${result.title}`) + '\n' +
chalk.white(`Status: ${getStatusWithColor(result.status)}`) + '\n' +
chalk.white(`Dependencies: ${result.dependencies.join(', ')}`) + '\n\n' +
chalk.white.bold('Next Steps:') + '\n' +
chalk.cyan(`1. Run ${chalk.yellow(`task-master show ${result.id}`)} to see details of the new task`) + '\n' +
chalk.cyan(`2. Run ${chalk.yellow(`task-master set-status --id=${result.id} --status=in-progress`)} to start working on it`),
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
));
} else {
// Display success message for deleted subtask
console.log(boxen(
chalk.white.bold(`Subtask ${subtaskId} Removed`) + '\n\n' +
chalk.white('The subtask has been successfully deleted.'),
{ padding: 1, borderColor: 'green', borderStyle: 'round', margin: { top: 1 } }
));
}
}
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
showRemoveSubtaskHelp();
process.exit(1);
}
})
.on('error', function(err) {
console.error(chalk.red(`Error: ${err.message}`));
showRemoveSubtaskHelp();
process.exit(1);
});
// Helper function to show remove-subtask command help
function showRemoveSubtaskHelp() {
console.log(boxen(
chalk.white.bold('Remove Subtask Command Help') + '\n\n' +
chalk.cyan('Usage:') + '\n' +
` task-master remove-subtask --id=<parentId.subtaskId> [options]\n\n` +
chalk.cyan('Options:') + '\n' +
' -i, --id <id> Subtask ID(s) to remove in format "parentId.subtaskId" (can be comma-separated, required)\n' +
' -c, --convert Convert the subtask to a standalone task instead of deleting it\n' +
' -f, --file <file> Path to the tasks file (default: "tasks/tasks.json")\n' +
' --skip-generate Skip regenerating task files\n\n' +
chalk.cyan('Examples:') + '\n' +
' task-master remove-subtask --id=5.2\n' +
' task-master remove-subtask --id=5.2,6.3,7.1\n' +
' task-master remove-subtask --id=5.2 --convert',
{ padding: 1, borderColor: 'blue', borderStyle: 'round' }
));
}
// init command (documentation only, implementation is in init.js)
programInstance
.command('init')
@@ -634,6 +797,132 @@ function setupCLI() {
return programInstance;
}
/**
* Check for newer version of task-master-ai
* @returns {Promise<{currentVersion: string, latestVersion: string, needsUpdate: boolean}>}
*/
async function checkForUpdate() {
// Get current version from package.json
let currentVersion = CONFIG.projectVersion;
try {
// Try to get the version from the installed package
const packageJsonPath = path.join(process.cwd(), 'node_modules', 'task-master-ai', 'package.json');
if (fs.existsSync(packageJsonPath)) {
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
currentVersion = packageJson.version;
}
} catch (error) {
// Silently fail and use default
log('debug', `Error reading current package version: ${error.message}`);
}
return new Promise((resolve) => {
// Get the latest version from npm registry
const options = {
hostname: 'registry.npmjs.org',
path: '/task-master-ai',
method: 'GET',
headers: {
'Accept': 'application/vnd.npm.install-v1+json' // Lightweight response
}
};
const req = https.request(options, (res) => {
let data = '';
res.on('data', (chunk) => {
data += chunk;
});
res.on('end', () => {
try {
const npmData = JSON.parse(data);
const latestVersion = npmData['dist-tags']?.latest || currentVersion;
// Compare versions
const needsUpdate = compareVersions(currentVersion, latestVersion) < 0;
resolve({
currentVersion,
latestVersion,
needsUpdate
});
} catch (error) {
log('debug', `Error parsing npm response: ${error.message}`);
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
}
});
});
req.on('error', (error) => {
log('debug', `Error checking for updates: ${error.message}`);
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
});
// Set a timeout to avoid hanging if npm is slow
req.setTimeout(3000, () => {
req.abort();
log('debug', 'Update check timed out');
resolve({
currentVersion,
latestVersion: currentVersion,
needsUpdate: false
});
});
req.end();
});
}
/**
* Compare semantic versions
* @param {string} v1 - First version
* @param {string} v2 - Second version
* @returns {number} -1 if v1 < v2, 0 if v1 = v2, 1 if v1 > v2
*/
function compareVersions(v1, v2) {
const v1Parts = v1.split('.').map(p => parseInt(p, 10));
const v2Parts = v2.split('.').map(p => parseInt(p, 10));
for (let i = 0; i < Math.max(v1Parts.length, v2Parts.length); i++) {
const v1Part = v1Parts[i] || 0;
const v2Part = v2Parts[i] || 0;
if (v1Part < v2Part) return -1;
if (v1Part > v2Part) return 1;
}
return 0;
}
/**
* Display upgrade notification message
* @param {string} currentVersion - Current version
* @param {string} latestVersion - Latest version
*/
function displayUpgradeNotification(currentVersion, latestVersion) {
const message = boxen(
`${chalk.blue.bold('Update Available!')} ${chalk.dim(currentVersion)}${chalk.green(latestVersion)}\n\n` +
`Run ${chalk.cyan('npm i task-master-ai@latest -g')} to update to the latest version with new features and bug fixes.`,
{
padding: 1,
margin: { top: 1, bottom: 1 },
borderColor: 'yellow',
borderStyle: 'round'
}
);
console.log(message);
}
/**
* Parse arguments and run the CLI
* @param {Array} argv - Command-line arguments
@@ -651,9 +940,18 @@ async function runCLI(argv = process.argv) {
process.exit(0);
}
// Start the update check in the background - don't await yet
const updateCheckPromise = checkForUpdate();
// Setup and parse
const programInstance = setupCLI();
await programInstance.parseAsync(argv);
// After command execution, check if an update is available
const updateInfo = await updateCheckPromise;
if (updateInfo.needsUpdate) {
displayUpgradeNotification(updateInfo.currentVersion, updateInfo.latestVersion);
}
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
@@ -668,5 +966,8 @@ async function runCLI(argv = process.argv) {
export {
registerCommands,
setupCLI,
runCLI
runCLI,
checkForUpdate,
compareVersions,
displayUpgradeNotification
};

View File

@@ -181,6 +181,16 @@ async function updateTasks(tasksPath, fromId, prompt, useResearch = false) {
console.log(table.toString());
// Display a message about how completed subtasks are handled
console.log(boxen(
chalk.cyan.bold('How Completed Subtasks Are Handled:') + '\n\n' +
chalk.white('• Subtasks marked as "done" or "completed" will be preserved\n') +
chalk.white('• New subtasks will build upon what has already been completed\n') +
chalk.white('• If completed work needs revision, a new subtask will be created instead of modifying done items\n') +
chalk.white('• This approach maintains a clear record of completed work and new requirements'),
{ padding: 1, borderColor: 'blue', borderStyle: 'round', margin: { top: 1, bottom: 1 } }
));
// Build the system prompt
const systemPrompt = `You are an AI assistant helping to update software development tasks based on new context.
You will be given a set of tasks and a prompt describing changes or new implementation details.
@@ -192,6 +202,11 @@ Guidelines:
3. Do not change anything unnecessarily - just adapt what needs to change based on the prompt
4. You should return ALL the tasks in order, not just the modified ones
5. Return a complete valid JSON object with the updated tasks array
6. VERY IMPORTANT: Preserve all subtasks marked as "done" or "completed" - do not modify their content
7. For tasks with completed subtasks, build upon what has already been done rather than rewriting everything
8. If an existing completed subtask needs to be changed/undone based on the new context, DO NOT modify it directly
9. Instead, add a new subtask that clearly indicates what needs to be changed or replaced
10. Use the existence of completed subtasks as an opportunity to make new subtasks more specific and targeted
The changes described in the prompt should be applied to ALL tasks in the list.`;
@@ -213,7 +228,7 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
messages: [
{
role: "system",
content: `${systemPrompt}\n\nAdditionally, please research the latest best practices, implementation details, and considerations when updating these tasks. Use your online search capabilities to gather relevant information.`
content: `${systemPrompt}\n\nAdditionally, please research the latest best practices, implementation details, and considerations when updating these tasks. Use your online search capabilities to gather relevant information. Remember to strictly follow the guidelines about preserving completed subtasks and building upon what has already been done rather than modifying or replacing it.`
},
{
role: "user",
@@ -223,6 +238,8 @@ ${taskData}
Please update these tasks based on the following new context:
${prompt}
IMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
Return only the updated tasks as a valid JSON array.`
}
],
@@ -272,6 +289,8 @@ ${taskData}
Please update these tasks based on the following new context:
${prompt}
IMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
Return only the updated tasks as a valid JSON array.`
}
],
@@ -339,6 +358,380 @@ Return only the updated tasks as a valid JSON array.`
}
}
/**
* Update a single task by ID
* @param {string} tasksPath - Path to the tasks.json file
* @param {number} taskId - Task ID to update
* @param {string} prompt - Prompt with new context
* @param {boolean} useResearch - Whether to use Perplexity AI for research
* @returns {Object} - Updated task data or null if task wasn't updated
*/
async function updateTaskById(tasksPath, taskId, prompt, useResearch = false) {
try {
log('info', `Updating single task ${taskId} with prompt: "${prompt}"`);
// Validate task ID is a positive integer
if (!Number.isInteger(taskId) || taskId <= 0) {
throw new Error(`Invalid task ID: ${taskId}. Task ID must be a positive integer.`);
}
// Validate prompt
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
throw new Error('Prompt cannot be empty. Please provide context for the task update.');
}
// Validate research flag
if (useResearch && (!perplexity || !process.env.PERPLEXITY_API_KEY)) {
log('warn', 'Perplexity AI is not available. Falling back to Claude AI.');
console.log(chalk.yellow('Perplexity AI is not available (API key may be missing). Falling back to Claude AI.'));
useResearch = false;
}
// Validate tasks file exists
if (!fs.existsSync(tasksPath)) {
throw new Error(`Tasks file not found at path: ${tasksPath}`);
}
// Read the tasks file
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
throw new Error(`No valid tasks found in ${tasksPath}. The file may be corrupted or have an invalid format.`);
}
// Find the specific task to update
const taskToUpdate = data.tasks.find(task => task.id === taskId);
if (!taskToUpdate) {
throw new Error(`Task with ID ${taskId} not found. Please verify the task ID and try again.`);
}
// Check if task is already completed
if (taskToUpdate.status === 'done' || taskToUpdate.status === 'completed') {
log('warn', `Task ${taskId} is already marked as done and cannot be updated`);
console.log(boxen(
chalk.yellow(`Task ${taskId} is already marked as ${taskToUpdate.status} and cannot be updated.`) + '\n\n' +
chalk.white('Completed tasks are locked to maintain consistency. To modify a completed task, you must first:') + '\n' +
chalk.white('1. Change its status to "pending" or "in-progress"') + '\n' +
chalk.white('2. Then run the update-task command'),
{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }
));
return null;
}
// Show the task that will be updated
const table = new Table({
head: [
chalk.cyan.bold('ID'),
chalk.cyan.bold('Title'),
chalk.cyan.bold('Status')
],
colWidths: [5, 60, 10]
});
table.push([
taskToUpdate.id,
truncate(taskToUpdate.title, 57),
getStatusWithColor(taskToUpdate.status)
]);
console.log(boxen(
chalk.white.bold(`Updating Task #${taskId}`),
{ padding: 1, borderColor: 'blue', borderStyle: 'round', margin: { top: 1, bottom: 0 } }
));
console.log(table.toString());
// Display a message about how completed subtasks are handled
console.log(boxen(
chalk.cyan.bold('How Completed Subtasks Are Handled:') + '\n\n' +
chalk.white('• Subtasks marked as "done" or "completed" will be preserved\n') +
chalk.white('• New subtasks will build upon what has already been completed\n') +
chalk.white('• If completed work needs revision, a new subtask will be created instead of modifying done items\n') +
chalk.white('• This approach maintains a clear record of completed work and new requirements'),
{ padding: 1, borderColor: 'blue', borderStyle: 'round', margin: { top: 1, bottom: 1 } }
));
// Build the system prompt
const systemPrompt = `You are an AI assistant helping to update a software development task based on new context.
You will be given a task and a prompt describing changes or new implementation details.
Your job is to update the task to reflect these changes, while preserving its basic structure.
Guidelines:
1. VERY IMPORTANT: NEVER change the title of the task - keep it exactly as is
2. Maintain the same ID, status, and dependencies unless specifically mentioned in the prompt
3. Update the description, details, and test strategy to reflect the new information
4. Do not change anything unnecessarily - just adapt what needs to change based on the prompt
5. Return a complete valid JSON object representing the updated task
6. VERY IMPORTANT: Preserve all subtasks marked as "done" or "completed" - do not modify their content
7. For tasks with completed subtasks, build upon what has already been done rather than rewriting everything
8. If an existing completed subtask needs to be changed/undone based on the new context, DO NOT modify it directly
9. Instead, add a new subtask that clearly indicates what needs to be changed or replaced
10. Use the existence of completed subtasks as an opportunity to make new subtasks more specific and targeted
11. Ensure any new subtasks have unique IDs that don't conflict with existing ones
The changes described in the prompt should be thoughtfully applied to make the task more accurate and actionable.`;
const taskData = JSON.stringify(taskToUpdate, null, 2);
let updatedTask;
const loadingIndicator = startLoadingIndicator(useResearch
? 'Updating task with Perplexity AI research...'
: 'Updating task with Claude AI...');
try {
if (useResearch) {
log('info', 'Using Perplexity AI for research-backed task update');
// Verify Perplexity API key exists
if (!process.env.PERPLEXITY_API_KEY) {
throw new Error('PERPLEXITY_API_KEY environment variable is missing but --research flag was used.');
}
try {
// Call Perplexity AI
const perplexityModel = process.env.PERPLEXITY_MODEL || 'sonar-pro';
const result = await perplexity.chat.completions.create({
model: perplexityModel,
messages: [
{
role: "system",
content: `${systemPrompt}\n\nAdditionally, please research the latest best practices, implementation details, and considerations when updating this task. Use your online search capabilities to gather relevant information. Remember to strictly follow the guidelines about preserving completed subtasks and building upon what has already been done rather than modifying or replacing it.`
},
{
role: "user",
content: `Here is the task to update:
${taskData}
Please update this task based on the following new context:
${prompt}
IMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
Return only the updated task as a valid JSON object.`
}
],
temperature: parseFloat(process.env.TEMPERATURE || CONFIG.temperature),
max_tokens: parseInt(process.env.MAX_TOKENS || CONFIG.maxTokens),
});
const responseText = result.choices[0].message.content;
// Extract JSON from response
const jsonStart = responseText.indexOf('{');
const jsonEnd = responseText.lastIndexOf('}');
if (jsonStart === -1 || jsonEnd === -1) {
throw new Error("Could not find valid JSON object in Perplexity's response. The response may be malformed.");
}
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
try {
updatedTask = JSON.parse(jsonText);
} catch (parseError) {
throw new Error(`Failed to parse Perplexity response as JSON: ${parseError.message}\nResponse fragment: ${jsonText.substring(0, 100)}...`);
}
} catch (perplexityError) {
throw new Error(`Perplexity API error: ${perplexityError.message}`);
}
} else {
// Call Claude to update the task with streaming enabled
let responseText = '';
let streamingInterval = null;
try {
// Verify Anthropic API key exists
if (!process.env.ANTHROPIC_API_KEY) {
throw new Error('ANTHROPIC_API_KEY environment variable is missing. Required for task updates.');
}
// Update loading indicator to show streaming progress
let dotCount = 0;
const readline = await import('readline');
streamingInterval = setInterval(() => {
readline.cursorTo(process.stdout, 0);
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
dotCount = (dotCount + 1) % 4;
}, 500);
// Use streaming API call
const stream = await anthropic.messages.create({
model: CONFIG.model,
max_tokens: CONFIG.maxTokens,
temperature: CONFIG.temperature,
system: systemPrompt,
messages: [
{
role: 'user',
content: `Here is the task to update:
${taskData}
Please update this task based on the following new context:
${prompt}
IMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
Return only the updated task as a valid JSON object.`
}
],
stream: true
});
// Process the stream
for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
responseText += chunk.delta.text;
}
}
if (streamingInterval) clearInterval(streamingInterval);
log('info', "Completed streaming response from Claude API!");
// Extract JSON from response
const jsonStart = responseText.indexOf('{');
const jsonEnd = responseText.lastIndexOf('}');
if (jsonStart === -1 || jsonEnd === -1) {
throw new Error("Could not find valid JSON object in Claude's response. The response may be malformed.");
}
const jsonText = responseText.substring(jsonStart, jsonEnd + 1);
try {
updatedTask = JSON.parse(jsonText);
} catch (parseError) {
throw new Error(`Failed to parse Claude response as JSON: ${parseError.message}\nResponse fragment: ${jsonText.substring(0, 100)}...`);
}
} catch (claudeError) {
if (streamingInterval) clearInterval(streamingInterval);
throw new Error(`Claude API error: ${claudeError.message}`);
}
}
// Validation of the updated task
if (!updatedTask || typeof updatedTask !== 'object') {
throw new Error('Received invalid task object from AI. The response did not contain a valid task.');
}
// Ensure critical fields exist
if (!updatedTask.title || !updatedTask.description) {
throw new Error('Updated task is missing required fields (title or description).');
}
// Ensure ID is preserved
if (updatedTask.id !== taskId) {
log('warn', `Task ID was modified in the AI response. Restoring original ID ${taskId}.`);
updatedTask.id = taskId;
}
// Ensure status is preserved unless explicitly changed in prompt
if (updatedTask.status !== taskToUpdate.status && !prompt.toLowerCase().includes('status')) {
log('warn', `Task status was modified without explicit instruction. Restoring original status '${taskToUpdate.status}'.`);
updatedTask.status = taskToUpdate.status;
}
// Ensure completed subtasks are preserved
if (taskToUpdate.subtasks && taskToUpdate.subtasks.length > 0) {
if (!updatedTask.subtasks) {
log('warn', 'Subtasks were removed in the AI response. Restoring original subtasks.');
updatedTask.subtasks = taskToUpdate.subtasks;
} else {
// Check for each completed subtask
const completedSubtasks = taskToUpdate.subtasks.filter(
st => st.status === 'done' || st.status === 'completed'
);
for (const completedSubtask of completedSubtasks) {
const updatedSubtask = updatedTask.subtasks.find(st => st.id === completedSubtask.id);
// If completed subtask is missing or modified, restore it
if (!updatedSubtask) {
log('warn', `Completed subtask ${completedSubtask.id} was removed. Restoring it.`);
updatedTask.subtasks.push(completedSubtask);
} else if (
updatedSubtask.title !== completedSubtask.title ||
updatedSubtask.description !== completedSubtask.description ||
updatedSubtask.details !== completedSubtask.details ||
updatedSubtask.status !== completedSubtask.status
) {
log('warn', `Completed subtask ${completedSubtask.id} was modified. Restoring original.`);
// Find and replace the modified subtask
const index = updatedTask.subtasks.findIndex(st => st.id === completedSubtask.id);
if (index !== -1) {
updatedTask.subtasks[index] = completedSubtask;
}
}
}
// Ensure no duplicate subtask IDs
const subtaskIds = new Set();
const uniqueSubtasks = [];
for (const subtask of updatedTask.subtasks) {
if (!subtaskIds.has(subtask.id)) {
subtaskIds.add(subtask.id);
uniqueSubtasks.push(subtask);
} else {
log('warn', `Duplicate subtask ID ${subtask.id} found. Removing duplicate.`);
}
}
updatedTask.subtasks = uniqueSubtasks;
}
}
// Update the task in the original data
const index = data.tasks.findIndex(t => t.id === taskId);
if (index !== -1) {
data.tasks[index] = updatedTask;
} else {
throw new Error(`Task with ID ${taskId} not found in tasks array.`);
}
// Write the updated tasks to the file
writeJSON(tasksPath, data);
log('success', `Successfully updated task ${taskId}`);
// Generate individual task files
await generateTaskFiles(tasksPath, path.dirname(tasksPath));
console.log(boxen(
chalk.green(`Successfully updated task #${taskId}`) + '\n\n' +
chalk.white.bold('Updated Title:') + ' ' + updatedTask.title,
{ padding: 1, borderColor: 'green', borderStyle: 'round' }
));
// Return the updated task for testing purposes
return updatedTask;
} finally {
stopLoadingIndicator(loadingIndicator);
}
} catch (error) {
log('error', `Error updating task: ${error.message}`);
console.error(chalk.red(`Error: ${error.message}`));
// Provide more helpful error messages for common issues
if (error.message.includes('ANTHROPIC_API_KEY')) {
console.log(chalk.yellow('\nTo fix this issue, set your Anthropic API key:'));
console.log(' export ANTHROPIC_API_KEY=your_api_key_here');
} else if (error.message.includes('PERPLEXITY_API_KEY')) {
console.log(chalk.yellow('\nTo fix this issue:'));
console.log(' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here');
console.log(' 2. Or run without the research flag: task-master update-task --id=<id> --prompt="..."');
} else if (error.message.includes('Task with ID') && error.message.includes('not found')) {
console.log(chalk.yellow('\nTo fix this issue:'));
console.log(' 1. Run task-master list to see all available task IDs');
console.log(' 2. Use a valid task ID with the --id parameter');
}
if (CONFIG.debug) {
console.error(error);
}
return null;
}
}
/**
* Generate individual task files from tasks.json
* @param {string} tasksPath - Path to the tasks.json file
@@ -2580,6 +2973,7 @@ async function removeSubtask(tasksPath, subtaskId, convertToTask = false, genera
export {
parsePRD,
updateTasks,
updateTaskById,
generateTaskFiles,
setTaskStatus,
updateSingleTaskStatus,

View File

@@ -760,7 +760,7 @@ async function displayTaskById(tasksPath, taskId) {
const availableWidth = process.stdout.columns - 10 || 100; // Default to 100 if can't detect
// Define percentage-based column widths
const idWidthPct = 8;
const idWidthPct = 10;
const statusWidthPct = 15;
const depsWidthPct = 25;
const titleWidthPct = 100 - idWidthPct - statusWidthPct - depsWidthPct;

View File

@@ -1,6 +1,6 @@
# Task ID: 1
# Title: Implement Task Data Structure
# Status: done
# Status: in-progress
# Dependencies: None
# Priority: high
# Description: Design and implement the core tasks.json structure that will serve as the single source of truth for the system.

156
tasks/task_034.txt Normal file
View File

@@ -0,0 +1,156 @@
# Task ID: 34
# Title: Implement updateTask Command for Single Task Updates
# Status: done
# Dependencies: None
# Priority: high
# Description: Create a new command that allows updating a specific task by ID using AI-driven refinement while preserving completed subtasks and supporting all existing update command options.
# Details:
Implement a new command called 'updateTask' that focuses on updating a single task rather than all tasks from an ID onwards. The implementation should:
1. Accept a single task ID as a required parameter
2. Use the same AI-driven approach as the existing update command to refine the task
3. Preserve the completion status of any subtasks that were previously marked as complete
4. Support all options from the existing update command including:
- The research flag for Perplexity integration
- Any formatting or refinement options
- Task context options
5. Update the CLI help documentation to include this new command
6. Ensure the command follows the same pattern as other commands in the codebase
7. Add appropriate error handling for cases where the specified task ID doesn't exist
8. Implement the ability to update task title, description, and details separately if needed
9. Ensure the command returns appropriate success/failure messages
10. Optimize the implementation to only process the single task rather than scanning through all tasks
The command should reuse existing AI prompt templates where possible but modify them to focus on refining a single task rather than multiple tasks.
# Test Strategy:
Testing should verify the following aspects:
1. **Basic Functionality Test**: Verify that the command successfully updates a single task when given a valid task ID
2. **Preservation Test**: Create a task with completed subtasks, update it, and verify the completion status remains intact
3. **Research Flag Test**: Test the command with the research flag and verify it correctly integrates with Perplexity
4. **Error Handling Tests**:
- Test with non-existent task ID and verify appropriate error message
- Test with invalid parameters and verify helpful error messages
5. **Integration Test**: Run a complete workflow that creates a task, updates it with updateTask, and then verifies the changes are persisted
6. **Comparison Test**: Compare the results of updating a single task with updateTask versus using the original update command on the same task to ensure consistent quality
7. **Performance Test**: Measure execution time compared to the full update command to verify efficiency gains
8. **CLI Help Test**: Verify the command appears correctly in help documentation with appropriate descriptions
Create unit tests for the core functionality and integration tests for the complete workflow. Document any edge cases discovered during testing.
# Subtasks:
## 1. Create updateTaskById function in task-manager.js [done]
### Dependencies: None
### Description: Implement a new function in task-manager.js that focuses on updating a single task by ID using AI-driven refinement while preserving completed subtasks.
### Details:
Implementation steps:
1. Create a new `updateTaskById` function in task-manager.js that accepts parameters: taskId, options object (containing research flag, formatting options, etc.)
2. Implement logic to find a specific task by ID in the tasks array
3. Add appropriate error handling for cases where the task ID doesn't exist (throw a custom error)
4. Reuse existing AI prompt templates but modify them to focus on refining a single task
5. Implement logic to preserve completion status of subtasks that were previously marked as complete
6. Add support for updating task title, description, and details separately based on options
7. Optimize the implementation to only process the single task rather than scanning through all tasks
8. Return the updated task and appropriate success/failure messages
Testing approach:
- Unit test the function with various scenarios including:
- Valid task ID with different update options
- Non-existent task ID
- Task with completed subtasks to verify preservation
- Different combinations of update options
## 2. Implement updateTask command in commands.js [done]
### Dependencies: 34.1
### Description: Create a new command called 'updateTask' in commands.js that leverages the updateTaskById function to update a specific task by ID.
### Details:
Implementation steps:
1. Create a new command object for 'updateTask' in commands.js following the Command pattern
2. Define command parameters including a required taskId parameter
3. Support all options from the existing update command:
- Research flag for Perplexity integration
- Formatting and refinement options
- Task context options
4. Implement the command handler function that calls the updateTaskById function from task-manager.js
5. Add appropriate error handling to catch and display user-friendly error messages
6. Ensure the command follows the same pattern as other commands in the codebase
7. Implement proper validation of input parameters
8. Format and return appropriate success/failure messages to the user
Testing approach:
- Unit test the command handler with various input combinations
- Test error handling scenarios
- Verify command options are correctly passed to the updateTaskById function
## 3. Add comprehensive error handling and validation [done]
### Dependencies: 34.1, 34.2
### Description: Implement robust error handling and validation for the updateTask command to ensure proper user feedback and system stability.
### Details:
Implementation steps:
1. Create custom error types for different failure scenarios (TaskNotFoundError, ValidationError, etc.)
2. Implement input validation for the taskId parameter and all options
3. Add proper error handling for AI service failures with appropriate fallback mechanisms
4. Implement concurrency handling to prevent conflicts when multiple updates occur simultaneously
5. Add comprehensive logging for debugging and auditing purposes
6. Ensure all error messages are user-friendly and actionable
7. Implement proper HTTP status codes for API responses if applicable
8. Add validation to ensure the task exists before attempting updates
Testing approach:
- Test various error scenarios including invalid inputs, non-existent tasks, and API failures
- Verify error messages are clear and helpful
- Test concurrency scenarios with multiple simultaneous updates
- Verify logging captures appropriate information for troubleshooting
## 4. Write comprehensive tests for updateTask command [done]
### Dependencies: 34.1, 34.2, 34.3
### Description: Create a comprehensive test suite for the updateTask command to ensure it works correctly in all scenarios and maintains backward compatibility.
### Details:
Implementation steps:
1. Create unit tests for the updateTaskById function in task-manager.js
- Test finding and updating tasks with various IDs
- Test preservation of completed subtasks
- Test different update options combinations
- Test error handling for non-existent tasks
2. Create unit tests for the updateTask command in commands.js
- Test command parameter parsing
- Test option handling
- Test error scenarios and messages
3. Create integration tests that verify the end-to-end flow
- Test the command with actual AI service integration
- Test with mock AI responses for predictable testing
4. Implement test fixtures and mocks for consistent testing
5. Add performance tests to ensure the command is efficient
6. Test edge cases such as empty tasks, tasks with many subtasks, etc.
Testing approach:
- Use Jest or similar testing framework
- Implement mocks for external dependencies like AI services
- Create test fixtures for consistent test data
- Use snapshot testing for command output verification
## 5. Update CLI documentation and help text [done]
### Dependencies: 34.2
### Description: Update the CLI help documentation to include the new updateTask command and ensure users understand its purpose and options.
### Details:
Implementation steps:
1. Add comprehensive help text for the updateTask command including:
- Command description
- Required and optional parameters
- Examples of usage
- Description of all supported options
2. Update the main CLI help documentation to include the new command
3. Add the command to any relevant command groups or categories
4. Create usage examples that demonstrate common scenarios
5. Update README.md and other documentation files to include information about the new command
6. Add inline code comments explaining the implementation details
7. Update any API documentation if applicable
8. Create or update user guides with the new functionality
Testing approach:
- Verify help text is displayed correctly when running `--help`
- Review documentation for clarity and completeness
- Have team members review the documentation for usability
- Test examples to ensure they work as documented

48
tasks/task_035.txt Normal file
View File

@@ -0,0 +1,48 @@
# Task ID: 35
# Title: Integrate Grok3 API for Research Capabilities
# Status: pending
# Dependencies: None
# Priority: medium
# Description: Replace the current Perplexity API integration with Grok3 API for all research-related functionalities while maintaining existing feature parity.
# Details:
This task involves migrating from Perplexity to Grok3 API for research capabilities throughout the application. Implementation steps include:
1. Create a new API client module for Grok3 in `src/api/grok3.ts` that handles authentication, request formatting, and response parsing
2. Update the research service layer to use the new Grok3 client instead of Perplexity
3. Modify the request payload structure to match Grok3's expected format (parameters like temperature, max_tokens, etc.)
4. Update response handling to properly parse and extract Grok3's response format
5. Implement proper error handling for Grok3-specific error codes and messages
6. Update environment variables and configuration files to include Grok3 API keys and endpoints
7. Ensure rate limiting and quota management are properly implemented according to Grok3's specifications
8. Update any UI components that display research provider information to show Grok3 instead of Perplexity
9. Maintain backward compatibility for any stored research results from Perplexity
10. Document the new API integration in the developer documentation
Grok3 API has different parameter requirements and response formats compared to Perplexity, so careful attention must be paid to these differences during implementation.
# Test Strategy:
Testing should verify that the Grok3 API integration works correctly and maintains feature parity with the previous Perplexity implementation:
1. Unit tests:
- Test the Grok3 API client with mocked responses
- Verify proper error handling for various error scenarios (rate limits, authentication failures, etc.)
- Test the transformation of application requests to Grok3-compatible format
2. Integration tests:
- Perform actual API calls to Grok3 with test credentials
- Verify that research results are correctly parsed and returned
- Test with various types of research queries to ensure broad compatibility
3. End-to-end tests:
- Test the complete research flow from UI input to displayed results
- Verify that all existing research features work with the new API
4. Performance tests:
- Compare response times between Perplexity and Grok3
- Ensure the application handles any differences in response time appropriately
5. Regression tests:
- Verify that existing features dependent on research capabilities continue to work
- Test that stored research results from Perplexity are still accessible and displayed correctly
Create a test environment with both APIs available to compare results and ensure quality before fully replacing Perplexity with Grok3.

48
tasks/task_036.txt Normal file
View File

@@ -0,0 +1,48 @@
# Task ID: 36
# Title: Add Ollama Support for AI Services as Claude Alternative
# Status: pending
# Dependencies: None
# Priority: medium
# Description: Implement Ollama integration as an alternative to Claude for all main AI services, allowing users to run local language models instead of relying on cloud-based Claude API.
# Details:
This task involves creating a comprehensive Ollama integration that can replace Claude across all main AI services in the application. Implementation should include:
1. Create an OllamaService class that implements the same interface as the ClaudeService to ensure compatibility
2. Add configuration options to specify Ollama endpoint URL (default: http://localhost:11434)
3. Implement model selection functionality to allow users to choose which Ollama model to use (e.g., llama3, mistral, etc.)
4. Handle prompt formatting specific to Ollama models, ensuring proper system/user message separation
5. Implement proper error handling for cases where Ollama server is unavailable or returns errors
6. Add fallback mechanism to Claude when Ollama fails or isn't configured
7. Update the AI service factory to conditionally create either Claude or Ollama service based on configuration
8. Ensure token counting and rate limiting are appropriately handled for Ollama models
9. Add documentation for users explaining how to set up and use Ollama with the application
10. Optimize prompt templates specifically for Ollama models if needed
The implementation should be toggled through a configuration option (useOllama: true/false) and should maintain all existing functionality currently provided by Claude.
# Test Strategy:
Testing should verify that Ollama integration works correctly as a drop-in replacement for Claude:
1. Unit tests:
- Test OllamaService class methods in isolation with mocked responses
- Verify proper error handling when Ollama server is unavailable
- Test fallback mechanism to Claude when configured
2. Integration tests:
- Test with actual Ollama server running locally with at least two different models
- Verify all AI service functions work correctly with Ollama
- Compare outputs between Claude and Ollama for quality assessment
3. Configuration tests:
- Verify toggling between Claude and Ollama works as expected
- Test with various model configurations
4. Performance tests:
- Measure and compare response times between Claude and Ollama
- Test with different load scenarios
5. Manual testing:
- Verify all main AI features work correctly with Ollama
- Test edge cases like very long inputs or specialized tasks
Create a test document comparing output quality between Claude and various Ollama models to help users understand the tradeoffs.

49
tasks/task_037.txt Normal file
View File

@@ -0,0 +1,49 @@
# Task ID: 37
# Title: Add Gemini Support for Main AI Services as Claude Alternative
# Status: pending
# Dependencies: None
# Priority: medium
# Description: Implement Google's Gemini API integration as an alternative to Claude for all main AI services, allowing users to switch between different LLM providers.
# Details:
This task involves integrating Google's Gemini API across all main AI services that currently use Claude:
1. Create a new GeminiService class that implements the same interface as the existing ClaudeService
2. Implement authentication and API key management for Gemini API
3. Map our internal prompt formats to Gemini's expected input format
4. Handle Gemini-specific parameters (temperature, top_p, etc.) and response parsing
5. Update the AI service factory/provider to support selecting Gemini as an alternative
6. Add configuration options in settings to allow users to select Gemini as their preferred provider
7. Implement proper error handling for Gemini-specific API errors
8. Ensure streaming responses are properly supported if Gemini offers this capability
9. Update documentation to reflect the new Gemini option
10. Consider implementing model selection if Gemini offers multiple models (e.g., Gemini Pro, Gemini Ultra)
11. Ensure all existing AI capabilities (summarization, code generation, etc.) maintain feature parity when using Gemini
The implementation should follow the same pattern as the recent Ollama integration (Task #36) to maintain consistency in how alternative AI providers are supported.
# Test Strategy:
Testing should verify Gemini integration works correctly across all AI services:
1. Unit tests:
- Test GeminiService class methods with mocked API responses
- Verify proper error handling for common API errors
- Test configuration and model selection functionality
2. Integration tests:
- Verify authentication and API connection with valid credentials
- Test each AI service with Gemini to ensure proper functionality
- Compare outputs between Claude and Gemini for the same inputs to verify quality
3. End-to-end tests:
- Test the complete user flow of switching to Gemini and using various AI features
- Verify streaming responses work correctly if supported
4. Performance tests:
- Measure and compare response times between Claude and Gemini
- Test with various input lengths to verify handling of context limits
5. Manual testing:
- Verify the quality of Gemini responses across different use cases
- Test edge cases like very long inputs or specialized domain knowledge
All tests should pass with Gemini selected as the provider, and the user experience should be consistent regardless of which provider is selected.

56
tasks/task_038.txt Normal file
View File

@@ -0,0 +1,56 @@
# Task ID: 38
# Title: Implement Version Check System with Upgrade Notifications
# Status: done
# Dependencies: None
# Priority: high
# Description: Create a system that checks for newer package versions and displays upgrade notifications when users run any command, informing them to update to the latest version.
# Details:
Implement a version check mechanism that runs automatically with every command execution:
1. Create a new module (e.g., `versionChecker.js`) that will:
- Fetch the latest version from npm registry using the npm registry API (https://registry.npmjs.org/task-master-ai/latest)
- Compare it with the current installed version (from package.json)
- Store the last check timestamp to avoid excessive API calls (check once per day)
- Cache the result to minimize network requests
2. The notification should:
- Use colored text (e.g., yellow background with black text) to be noticeable
- Include the current version and latest version
- Show the exact upgrade command: 'npm i task-master-ai@latest'
- Be displayed at the beginning or end of command output, not interrupting the main content
- Include a small separator line to distinguish it from command output
3. Implementation considerations:
- Handle network failures gracefully (don't block command execution if version check fails)
- Add a configuration option to disable update checks if needed
- Ensure the check is lightweight and doesn't significantly impact command performance
- Consider using a package like 'semver' for proper version comparison
- Implement a cooldown period (e.g., only check once per day) to avoid excessive API calls
4. The version check should be integrated into the main command execution flow so it runs for all commands automatically.
# Test Strategy:
1. Manual testing:
- Install an older version of the package
- Run various commands and verify the update notification appears
- Update to the latest version and confirm the notification no longer appears
- Test with network disconnected to ensure graceful handling of failures
2. Unit tests:
- Mock the npm registry response to test different scenarios:
- When a newer version exists
- When using the latest version
- When the registry is unavailable
- Test the version comparison logic with various version strings
- Test the cooldown/caching mechanism works correctly
3. Integration tests:
- Create a test that runs a command and verifies the notification appears in the expected format
- Test that the notification appears for all commands
- Verify the notification doesn't interfere with normal command output
4. Edge cases to test:
- Pre-release versions (alpha/beta)
- Very old versions
- When package.json is missing or malformed
- When npm registry returns unexpected data

View File

@@ -0,0 +1,69 @@
import { checkForUpdate, displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
import fs from 'fs';
import path from 'path';
// Force our current version for testing
process.env.FORCE_VERSION = '0.9.30';
// Create a mock package.json in memory for testing
const mockPackageJson = {
name: 'task-master-ai',
version: '0.9.30'
};
// Modified version of checkForUpdate that doesn't use HTTP for testing
async function testCheckForUpdate(simulatedLatestVersion) {
// Get current version - use our forced version
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
console.log(`Using simulated current version: ${currentVersion}`);
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
// Compare versions
const needsUpdate = compareVersions(currentVersion, simulatedLatestVersion) < 0;
return {
currentVersion,
latestVersion: simulatedLatestVersion,
needsUpdate
};
}
// Test with current version older than latest (should show update notice)
async function runTest() {
console.log('=== Testing version check scenarios ===\n');
// Scenario 1: Update available
console.log('\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---');
const updateInfo1 = await testCheckForUpdate('1.0.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo1.currentVersion}`);
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
if (updateInfo1.needsUpdate) {
console.log('\nDisplaying upgrade notification:');
displayUpgradeNotification(updateInfo1.currentVersion, updateInfo1.latestVersion);
}
// Scenario 2: No update needed (versions equal)
console.log('\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---');
const updateInfo2 = await testCheckForUpdate('0.9.30');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo2.currentVersion}`);
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
// Scenario 3: Development version (current newer than latest)
console.log('\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---');
const updateInfo3 = await testCheckForUpdate('0.9.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo3.currentVersion}`);
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
console.log('\n=== Test complete ===');
}
// Run all tests
runTest();

22
test-version-check.js Normal file
View File

@@ -0,0 +1,22 @@
import { displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
// Simulate different version scenarios
console.log('=== Simulating version check ===\n');
// 1. Current version is older than latest (should show update notice)
console.log('Scenario 1: Current version older than latest');
displayUpgradeNotification('0.9.30', '1.0.0');
// 2. Current version same as latest (no update needed)
console.log('\nScenario 2: Current version same as latest (this would not normally show a notice)');
console.log('Current: 1.0.0, Latest: 1.0.0');
console.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));
console.log('Update needed:', compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No');
// 3. Current version newer than latest (e.g., development version, would not show notice)
console.log('\nScenario 3: Current version newer than latest (this would not normally show a notice)');
console.log('Current: 1.1.0, Latest: 1.0.0');
console.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));
console.log('Update needed:', compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No');
console.log('\n=== Test complete ===');

View File

@@ -6,6 +6,11 @@ import { jest } from '@jest/globals';
// Mock functions that need jest.fn methods
const mockParsePRD = jest.fn().mockResolvedValue(undefined);
const mockUpdateTaskById = jest.fn().mockResolvedValue({
id: 2,
title: 'Updated Task',
description: 'Updated description'
});
const mockDisplayBanner = jest.fn();
const mockDisplayHelp = jest.fn();
const mockLog = jest.fn();
@@ -37,7 +42,8 @@ jest.mock('../../scripts/modules/ui.js', () => ({
}));
jest.mock('../../scripts/modules/task-manager.js', () => ({
parsePRD: mockParsePRD
parsePRD: mockParsePRD,
updateTaskById: mockUpdateTaskById
}));
// Add this function before the mock of utils.js
@@ -286,4 +292,293 @@ describe('Commands Module', () => {
expect(mockParsePRD).toHaveBeenCalledWith(testFile, outputFile, numTasks);
});
});
describe('updateTask command', () => {
// Since mocking Commander is complex, we'll test the action handler directly
// Recreate the action handler logic based on commands.js
async function updateTaskAction(options) {
try {
const tasksPath = options.file;
// Validate required parameters
if (!options.id) {
console.error(chalk.red('Error: --id parameter is required'));
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
process.exit(1);
return; // Add early return to prevent calling updateTaskById
}
// Parse the task ID and validate it's a number
const taskId = parseInt(options.id, 10);
if (isNaN(taskId) || taskId <= 0) {
console.error(chalk.red(`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`));
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
process.exit(1);
return; // Add early return to prevent calling updateTaskById
}
if (!options.prompt) {
console.error(chalk.red('Error: --prompt parameter is required. Please provide information about the changes.'));
console.log(chalk.yellow('Usage example: task-master update-task --id=23 --prompt="Update with new information"'));
process.exit(1);
return; // Add early return to prevent calling updateTaskById
}
const prompt = options.prompt;
const useResearch = options.research || false;
// Validate tasks file exists
if (!fs.existsSync(tasksPath)) {
console.error(chalk.red(`Error: Tasks file not found at path: ${tasksPath}`));
if (tasksPath === 'tasks/tasks.json') {
console.log(chalk.yellow('Hint: Run task-master init or task-master parse-prd to create tasks.json first'));
} else {
console.log(chalk.yellow(`Hint: Check if the file path is correct: ${tasksPath}`));
}
process.exit(1);
return; // Add early return to prevent calling updateTaskById
}
console.log(chalk.blue(`Updating task ${taskId} with prompt: "${prompt}"`));
console.log(chalk.blue(`Tasks file: ${tasksPath}`));
if (useResearch) {
// Verify Perplexity API key exists if using research
if (!process.env.PERPLEXITY_API_KEY) {
console.log(chalk.yellow('Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'));
console.log(chalk.yellow('Falling back to Claude AI for task update.'));
} else {
console.log(chalk.blue('Using Perplexity AI for research-backed task update'));
}
}
const result = await mockUpdateTaskById(tasksPath, taskId, prompt, useResearch);
// If the task wasn't updated (e.g., if it was already marked as done)
if (!result) {
console.log(chalk.yellow('\nTask update was not completed. Review the messages above for details.'));
}
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
// Provide more helpful error messages for common issues
if (error.message.includes('task') && error.message.includes('not found')) {
console.log(chalk.yellow('\nTo fix this issue:'));
console.log(' 1. Run task-master list to see all available task IDs');
console.log(' 2. Use a valid task ID with the --id parameter');
} else if (error.message.includes('API key')) {
console.log(chalk.yellow('\nThis error is related to API keys. Check your environment variables.'));
}
if (true) { // CONFIG.debug
console.error(error);
}
process.exit(1);
}
}
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Set up spy for existsSync (already mocked in the outer scope)
mockExistsSync.mockReturnValue(true);
});
test('should validate required parameters - missing ID', async () => {
// Set up the command options without ID
const options = {
file: 'test-tasks.json',
prompt: 'Update the task'
};
// Call the action directly
await updateTaskAction(options);
// Verify validation error
expect(mockConsoleError).toHaveBeenCalledWith(expect.stringContaining('--id parameter is required'));
expect(mockExit).toHaveBeenCalledWith(1);
expect(mockUpdateTaskById).not.toHaveBeenCalled();
});
test('should validate required parameters - invalid ID', async () => {
// Set up the command options with invalid ID
const options = {
file: 'test-tasks.json',
id: 'not-a-number',
prompt: 'Update the task'
};
// Call the action directly
await updateTaskAction(options);
// Verify validation error
expect(mockConsoleError).toHaveBeenCalledWith(expect.stringContaining('Invalid task ID'));
expect(mockExit).toHaveBeenCalledWith(1);
expect(mockUpdateTaskById).not.toHaveBeenCalled();
});
test('should validate required parameters - missing prompt', async () => {
// Set up the command options without prompt
const options = {
file: 'test-tasks.json',
id: '2'
};
// Call the action directly
await updateTaskAction(options);
// Verify validation error
expect(mockConsoleError).toHaveBeenCalledWith(expect.stringContaining('--prompt parameter is required'));
expect(mockExit).toHaveBeenCalledWith(1);
expect(mockUpdateTaskById).not.toHaveBeenCalled();
});
test('should validate tasks file exists', async () => {
// Mock file not existing
mockExistsSync.mockReturnValue(false);
// Set up the command options
const options = {
file: 'missing-tasks.json',
id: '2',
prompt: 'Update the task'
};
// Call the action directly
await updateTaskAction(options);
// Verify validation error
expect(mockConsoleError).toHaveBeenCalledWith(expect.stringContaining('Tasks file not found'));
expect(mockExit).toHaveBeenCalledWith(1);
expect(mockUpdateTaskById).not.toHaveBeenCalled();
});
test('should call updateTaskById with correct parameters', async () => {
// Set up the command options
const options = {
file: 'test-tasks.json',
id: '2',
prompt: 'Update the task',
research: true
};
// Mock perplexity API key
process.env.PERPLEXITY_API_KEY = 'dummy-key';
// Call the action directly
await updateTaskAction(options);
// Verify updateTaskById was called with correct parameters
expect(mockUpdateTaskById).toHaveBeenCalledWith(
'test-tasks.json',
2,
'Update the task',
true
);
// Verify console output
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining('Updating task 2'));
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining('Using Perplexity AI'));
// Clean up
delete process.env.PERPLEXITY_API_KEY;
});
test('should handle null result from updateTaskById', async () => {
// Mock updateTaskById returning null (e.g., task already completed)
mockUpdateTaskById.mockResolvedValueOnce(null);
// Set up the command options
const options = {
file: 'test-tasks.json',
id: '2',
prompt: 'Update the task'
};
// Call the action directly
await updateTaskAction(options);
// Verify updateTaskById was called
expect(mockUpdateTaskById).toHaveBeenCalled();
// Verify console output for null result
expect(mockConsoleLog).toHaveBeenCalledWith(expect.stringContaining('Task update was not completed'));
});
test('should handle errors from updateTaskById', async () => {
// Mock updateTaskById throwing an error
mockUpdateTaskById.mockRejectedValueOnce(new Error('Task update failed'));
// Set up the command options
const options = {
file: 'test-tasks.json',
id: '2',
prompt: 'Update the task'
};
// Call the action directly
await updateTaskAction(options);
// Verify error handling
expect(mockConsoleError).toHaveBeenCalledWith(expect.stringContaining('Error: Task update failed'));
expect(mockExit).toHaveBeenCalledWith(1);
});
});
});
// Test the version comparison utility
describe('Version comparison', () => {
// Use a dynamic import for the commands module
let compareVersions;
beforeAll(async () => {
// Import the function we want to test dynamically
const commandsModule = await import('../../scripts/modules/commands.js');
compareVersions = commandsModule.compareVersions;
});
test('compareVersions correctly compares semantic versions', () => {
expect(compareVersions('1.0.0', '1.0.0')).toBe(0);
expect(compareVersions('1.0.0', '1.0.1')).toBe(-1);
expect(compareVersions('1.0.1', '1.0.0')).toBe(1);
expect(compareVersions('1.0.0', '1.1.0')).toBe(-1);
expect(compareVersions('1.1.0', '1.0.0')).toBe(1);
expect(compareVersions('1.0.0', '2.0.0')).toBe(-1);
expect(compareVersions('2.0.0', '1.0.0')).toBe(1);
expect(compareVersions('1.0', '1.0.0')).toBe(0);
expect(compareVersions('1.0.0.0', '1.0.0')).toBe(0);
expect(compareVersions('1.0.0', '1.0.0.1')).toBe(-1);
});
});
// Test the update check functionality
describe('Update check', () => {
let displayUpgradeNotification;
let consoleLogSpy;
beforeAll(async () => {
// Import the function we want to test dynamically
const commandsModule = await import('../../scripts/modules/commands.js');
displayUpgradeNotification = commandsModule.displayUpgradeNotification;
});
beforeEach(() => {
// Spy on console.log
consoleLogSpy = jest.spyOn(console, 'log').mockImplementation(() => {});
});
afterEach(() => {
consoleLogSpy.mockRestore();
});
test('displays upgrade notification when newer version is available', () => {
// Test displayUpgradeNotification function
displayUpgradeNotification('1.0.0', '1.1.0');
expect(consoleLogSpy).toHaveBeenCalled();
expect(consoleLogSpy.mock.calls[0][0]).toContain('Update Available!');
expect(consoleLogSpy.mock.calls[0][0]).toContain('1.0.0');
expect(consoleLogSpy.mock.calls[0][0]).toContain('1.1.0');
});
});

View File

@@ -143,4 +143,255 @@ describe('Windsurf Rules File Handling', () => {
expect.any(String)
);
});
});
// New test suite for MCP Configuration Handling
describe('MCP Configuration Handling', () => {
let tempDir;
beforeEach(() => {
jest.clearAllMocks();
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return JSON.stringify({
"mcpServers": {
"existing-server": {
"command": "node",
"args": ["server.js"]
}
}
});
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
// Return true for specific paths to test different scenarios
if (filePath.toString().includes('package.json')) {
return true;
}
// Default to false for other paths
return false;
});
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
});
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
// Test function that simulates the behavior of setupMCPConfiguration
function mockSetupMCPConfiguration(targetDir, projectName) {
const mcpDirPath = path.join(targetDir, '.cursor');
const mcpJsonPath = path.join(mcpDirPath, 'mcp.json');
// Create .cursor directory if it doesn't exist
if (!fs.existsSync(mcpDirPath)) {
fs.mkdirSync(mcpDirPath, { recursive: true });
}
// New MCP config to be added - references the installed package
const newMCPServer = {
"task-master-ai": {
"command": "npx",
"args": [
"task-master-ai",
"mcp-server"
]
}
};
// Check if mcp.json already exists
if (fs.existsSync(mcpJsonPath)) {
try {
// Read existing config
const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, 'utf8'));
// Initialize mcpServers if it doesn't exist
if (!mcpConfig.mcpServers) {
mcpConfig.mcpServers = {};
}
// Add the taskmaster-ai server if it doesn't exist
if (!mcpConfig.mcpServers["task-master-ai"]) {
mcpConfig.mcpServers["task-master-ai"] = newMCPServer["task-master-ai"];
}
// Write the updated configuration
fs.writeFileSync(
mcpJsonPath,
JSON.stringify(mcpConfig, null, 4)
);
} catch (error) {
// Create new configuration on error
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
}
} else {
// If mcp.json doesn't exist, create it
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
}
}
test('creates mcp.json when it does not exist', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
// Should create a proper structure with mcpServers key
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('mcpServers')
);
// Should reference npx command
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('npx')
);
});
test('updates existing mcp.json by adding new server', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override the existsSync mock to simulate mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Should preserve existing server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('existing-server')
);
// Should add our new server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
});
test('handles JSON parsing errors by creating new mcp.json', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override existsSync to say mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// But make readFileSync return invalid JSON
fs.readFileSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return '{invalid json';
}
return '{}';
});
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Should create a new valid JSON file with our server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
});
test('does not modify existing server configuration if it already exists', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override existsSync to say mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// Return JSON that already has task-master-ai
fs.readFileSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return JSON.stringify({
"mcpServers": {
"existing-server": {
"command": "node",
"args": ["server.js"]
},
"task-master-ai": {
"command": "custom",
"args": ["custom-args"]
}
}
});
}
return '{}';
});
// Spy to check what's written
const writeFileSyncSpy = jest.spyOn(fs, 'writeFileSync');
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Verify the written data contains the original taskmaster configuration
const dataWritten = JSON.parse(writeFileSyncSpy.mock.calls[0][1]);
expect(dataWritten.mcpServers["task-master-ai"].command).toBe("custom");
expect(dataWritten.mcpServers["task-master-ai"].args).toContain("custom-args");
});
test('creates the .cursor directory if it doesnt exist', () => {
// Arrange
const cursorDirPath = path.join(tempDir, '.cursor');
// Make sure it looks like the directory doesn't exist
fs.existsSync.mockReturnValue(false);
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
expect(fs.mkdirSync).toHaveBeenCalledWith(cursorDirPath, { recursive: true });
});
});

View File

@@ -22,6 +22,8 @@ const mockValidateAndFixDependencies = jest.fn();
const mockReadJSON = jest.fn();
const mockLog = jest.fn();
const mockIsTaskDependentOn = jest.fn().mockReturnValue(false);
const mockCreate = jest.fn(); // Mock for Anthropic messages.create
const mockChatCompletionsCreate = jest.fn(); // Mock for Perplexity chat.completions.create
// Mock fs module
jest.mock('fs', () => ({
@@ -63,6 +65,30 @@ jest.mock('../../scripts/modules/ai-services.js', () => ({
callPerplexity: mockCallPerplexity
}));
// Mock Anthropic SDK
jest.mock('@anthropic-ai/sdk', () => {
return {
Anthropic: jest.fn().mockImplementation(() => ({
messages: {
create: mockCreate
}
}))
};
});
// Mock Perplexity using OpenAI
jest.mock('openai', () => {
return {
default: jest.fn().mockImplementation(() => ({
chat: {
completions: {
create: mockChatCompletionsCreate
}
}
}))
};
});
// Mock the task-manager module itself to control what gets imported
jest.mock('../../scripts/modules/task-manager.js', () => {
// Get the original module to preserve function implementations
@@ -227,7 +253,7 @@ import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
// Destructure the required functions for convenience
const { findNextTask, generateTaskFiles, clearSubtasks } = taskManager;
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } = taskManager;
describe('Task Manager Module', () => {
beforeEach(() => {
@@ -1697,4 +1723,294 @@ const testRemoveSubtask = (tasksPath, subtaskId, convertToTask = false, generate
}
return convertedTask;
};
};
describe.skip('updateTaskById function', () => {
let mockConsoleLog;
let mockConsoleError;
let mockProcess;
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Set up default mock values
mockExistsSync.mockReturnValue(true);
mockWriteJSON.mockImplementation(() => {});
mockGenerateTaskFiles.mockResolvedValue(undefined);
// Create a deep copy of sample tasks for tests - use imported ES module instead of require
const sampleTasksDeepCopy = JSON.parse(JSON.stringify(sampleTasks));
mockReadJSON.mockReturnValue(sampleTasksDeepCopy);
// Mock console and process.exit
mockConsoleLog = jest.spyOn(console, 'log').mockImplementation(() => {});
mockConsoleError = jest.spyOn(console, 'error').mockImplementation(() => {});
mockProcess = jest.spyOn(process, 'exit').mockImplementation(() => {});
});
afterEach(() => {
// Restore console and process.exit
mockConsoleLog.mockRestore();
mockConsoleError.mockRestore();
mockProcess.mockRestore();
});
test('should update a task successfully', async () => {
// Mock the return value of messages.create and Anthropic
const mockTask = {
id: 2,
title: "Updated Core Functionality",
description: "Updated description",
status: "in-progress",
dependencies: [1],
priority: "high",
details: "Updated details",
testStrategy: "Updated test strategy"
};
// Mock streaming for successful response
const mockStream = {
[Symbol.asyncIterator]: jest.fn().mockImplementation(() => {
return {
next: jest.fn()
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '{"id": 2, "title": "Updated Core Functionality",' }
}
})
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '"description": "Updated description", "status": "in-progress",' }
}
})
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '"dependencies": [1], "priority": "high", "details": "Updated details",' }
}
})
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '"testStrategy": "Updated test strategy"}' }
}
})
.mockResolvedValueOnce({ done: true })
};
})
};
mockCreate.mockResolvedValue(mockStream);
// Call the function
const result = await updateTaskById('test-tasks.json', 2, 'Update task 2 with new information');
// Verify the task was updated
expect(result).toBeDefined();
expect(result.title).toBe("Updated Core Functionality");
expect(result.description).toBe("Updated description");
// Verify the correct functions were called
expect(mockReadJSON).toHaveBeenCalledWith('test-tasks.json');
expect(mockCreate).toHaveBeenCalled();
expect(mockWriteJSON).toHaveBeenCalled();
expect(mockGenerateTaskFiles).toHaveBeenCalled();
// Verify the task was updated in the tasks data
const tasksData = mockWriteJSON.mock.calls[0][1];
const updatedTask = tasksData.tasks.find(task => task.id === 2);
expect(updatedTask).toEqual(mockTask);
});
test('should return null when task is already completed', async () => {
// Call the function with a completed task
const result = await updateTaskById('test-tasks.json', 1, 'Update task 1 with new information');
// Verify the result is null
expect(result).toBeNull();
// Verify the correct functions were called
expect(mockReadJSON).toHaveBeenCalledWith('test-tasks.json');
expect(mockCreate).not.toHaveBeenCalled();
expect(mockWriteJSON).not.toHaveBeenCalled();
expect(mockGenerateTaskFiles).not.toHaveBeenCalled();
});
test('should handle task not found error', async () => {
// Call the function with a non-existent task
const result = await updateTaskById('test-tasks.json', 999, 'Update non-existent task');
// Verify the result is null
expect(result).toBeNull();
// Verify the error was logged
expect(mockLog).toHaveBeenCalledWith('error', expect.stringContaining('Task with ID 999 not found'));
expect(mockConsoleError).toHaveBeenCalledWith(expect.stringContaining('Task with ID 999 not found'));
// Verify the correct functions were called
expect(mockReadJSON).toHaveBeenCalledWith('test-tasks.json');
expect(mockCreate).not.toHaveBeenCalled();
expect(mockWriteJSON).not.toHaveBeenCalled();
expect(mockGenerateTaskFiles).not.toHaveBeenCalled();
});
test('should preserve completed subtasks', async () => {
// Modify the sample data to have a task with completed subtasks
const tasksData = mockReadJSON();
const task = tasksData.tasks.find(t => t.id === 3);
if (task && task.subtasks && task.subtasks.length > 0) {
// Mark the first subtask as completed
task.subtasks[0].status = 'done';
task.subtasks[0].title = 'Completed Header Component';
mockReadJSON.mockReturnValue(tasksData);
}
// Mock a response that tries to modify the completed subtask
const mockStream = {
[Symbol.asyncIterator]: jest.fn().mockImplementation(() => {
return {
next: jest.fn()
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '{"id": 3, "title": "Updated UI Components",' }
}
})
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '"description": "Updated description", "status": "pending",' }
}
})
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '"dependencies": [2], "priority": "medium", "subtasks": [' }
}
})
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '{"id": 1, "title": "Modified Header Component", "status": "pending"},' }
}
})
.mockResolvedValueOnce({
done: false,
value: {
type: 'content_block_delta',
delta: { text: '{"id": 2, "title": "Create Footer Component", "status": "pending"}]}' }
}
})
.mockResolvedValueOnce({ done: true })
};
})
};
mockCreate.mockResolvedValue(mockStream);
// Call the function
const result = await updateTaskById('test-tasks.json', 3, 'Update UI components task');
// Verify the subtasks were preserved
expect(result).toBeDefined();
expect(result.subtasks[0].title).toBe('Completed Header Component');
expect(result.subtasks[0].status).toBe('done');
// Verify the correct functions were called
expect(mockReadJSON).toHaveBeenCalledWith('test-tasks.json');
expect(mockCreate).toHaveBeenCalled();
expect(mockWriteJSON).toHaveBeenCalled();
expect(mockGenerateTaskFiles).toHaveBeenCalled();
});
test('should handle missing tasks file', async () => {
// Mock file not existing
mockExistsSync.mockReturnValue(false);
// Call the function
const result = await updateTaskById('missing-tasks.json', 2, 'Update task');
// Verify the result is null
expect(result).toBeNull();
// Verify the error was logged
expect(mockLog).toHaveBeenCalledWith('error', expect.stringContaining('Tasks file not found'));
expect(mockConsoleError).toHaveBeenCalledWith(expect.stringContaining('Tasks file not found'));
// Verify the correct functions were called
expect(mockReadJSON).not.toHaveBeenCalled();
expect(mockCreate).not.toHaveBeenCalled();
expect(mockWriteJSON).not.toHaveBeenCalled();
expect(mockGenerateTaskFiles).not.toHaveBeenCalled();
});
test('should handle API errors', async () => {
// Mock API error
mockCreate.mockRejectedValue(new Error('API error'));
// Call the function
const result = await updateTaskById('test-tasks.json', 2, 'Update task');
// Verify the result is null
expect(result).toBeNull();
// Verify the error was logged
expect(mockLog).toHaveBeenCalledWith('error', expect.stringContaining('API error'));
expect(mockConsoleError).toHaveBeenCalledWith(expect.stringContaining('API error'));
// Verify the correct functions were called
expect(mockReadJSON).toHaveBeenCalledWith('test-tasks.json');
expect(mockCreate).toHaveBeenCalled();
expect(mockWriteJSON).not.toHaveBeenCalled(); // Should not write on error
expect(mockGenerateTaskFiles).not.toHaveBeenCalled(); // Should not generate on error
});
test('should use Perplexity AI when research flag is true', async () => {
// Mock Perplexity API response
const mockPerplexityResponse = {
choices: [
{
message: {
content: '{"id": 2, "title": "Researched Core Functionality", "description": "Research-backed description", "status": "in-progress", "dependencies": [1], "priority": "high", "details": "Research-backed details", "testStrategy": "Research-backed test strategy"}'
}
}
]
};
mockChatCompletionsCreate.mockResolvedValue(mockPerplexityResponse);
// Set the Perplexity API key in environment
process.env.PERPLEXITY_API_KEY = 'dummy-key';
// Call the function with research flag
const result = await updateTaskById('test-tasks.json', 2, 'Update task with research', true);
// Verify the task was updated with research-backed information
expect(result).toBeDefined();
expect(result.title).toBe("Researched Core Functionality");
expect(result.description).toBe("Research-backed description");
// Verify the Perplexity API was called
expect(mockChatCompletionsCreate).toHaveBeenCalled();
expect(mockCreate).not.toHaveBeenCalled(); // Claude should not be called
// Verify the correct functions were called
expect(mockReadJSON).toHaveBeenCalledWith('test-tasks.json');
expect(mockWriteJSON).toHaveBeenCalled();
expect(mockGenerateTaskFiles).toHaveBeenCalled();
// Clean up
delete process.env.PERPLEXITY_API_KEY;
});
});