Compare commits

..

8 Commits

Author SHA1 Message Date
Ralph Khreish
b2ac3a4ef4 chore: rename CI 2025-04-08 12:31:22 +02:00
Ralph Khreish
b2396fd8fe chore: add prettier package 2025-04-08 12:27:25 +02:00
Ralph Khreish
a99b2b20b3 chore: add CI for prettier 2025-04-08 12:23:46 +02:00
Ralph Khreish
4136ef5679 chore: add prettier config and prettify 2025-04-08 12:22:21 +02:00
Ralph Khreish
a56a3628b3 CHORE: Add CI for making sure PRs don't break things (#89)
* fix: add CI for better control of regressions during PRs

* fix: slight readme improvement

* chore: fix CI

* cleanup

* fix: duplicate workflow trigger
2025-04-03 16:01:58 +02:00
Ralph Khreish
9dc5e75760 Revert "Update analyze-complexity with realtime feedback and enhanced complex…"
This reverts commit 16f4d4b932.
2025-04-02 19:28:01 +02:00
Joe Danziger
16f4d4b932 Update analyze-complexity with realtime feedback and enhanced complexity report (#70)
* Update analyze-complexity with realtime feedback

* PR fixes

* include changeset
2025-04-02 01:57:19 +02:00
Ralph Khreish
7fef5ab488 fix: github actions (#82) 2025-04-02 01:53:29 +02:00
64 changed files with 28012 additions and 24907 deletions

View File

@@ -0,0 +1,5 @@
---
"task-master-ai": patch
---
Add CI for testing

View File

@@ -1,10 +1,8 @@
{
"mcpServers": {
"taskmaster-ai": {
"command": "node",
"args": [
"./mcp-server/server.js"
]
}
}
}
"mcpServers": {
"taskmaster-ai": {
"command": "node",
"args": ["./mcp-server/server.js"]
}
}
}

95
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,95 @@
name: CI
on:
push:
branches:
- main
- next
pull_request:
branches:
- main
- next
permissions:
contents: read
jobs:
setup:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- name: Install Dependencies
id: install
run: npm ci
timeout-minutes: 2
- name: Cache node_modules
uses: actions/cache@v4
with:
path: node_modules
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
format-check:
needs: setup
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
- name: Restore node_modules
uses: actions/cache@v4
with:
path: node_modules
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
- name: Format Check
run: npm run format-check
env:
FORCE_COLOR: 1
test:
needs: setup
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
- name: Restore node_modules
uses: actions/cache@v4
with:
path: node_modules
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
- name: Run Tests
run: |
npm run test:coverage -- --coverageThreshold '{"global":{"branches":0,"functions":0,"lines":0,"statements":0}}' --detectOpenHandles --forceExit
env:
NODE_ENV: test
CI: true
FORCE_COLOR: 1
timeout-minutes: 10
- name: Upload Test Results
if: always()
uses: actions/upload-artifact@v4
with:
name: test-results
path: |
test-results
coverage
junit.xml
retention-days: 30

View File

@@ -14,9 +14,21 @@ jobs:
- uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- name: Cache node_modules
uses: actions/cache@v4
with:
path: |
node_modules
*/*/node_modules
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install Dependencies
run: npm install
run: npm ci
timeout-minutes: 2
- name: Create Release Pull Request or Publish to npm
uses: changesets/action@v1

6
.prettierignore Normal file
View File

@@ -0,0 +1,6 @@
# Ignore artifacts:
build
coverage
.changeset
tasks
package-lock.json

11
.prettierrc Normal file
View File

@@ -0,0 +1,11 @@
{
"printWidth": 80,
"tabWidth": 2,
"useTabs": true,
"semi": true,
"singleQuote": true,
"trailingComma": "none",
"bracketSpacing": true,
"arrowParens": "always",
"endOfLine": "lf"
}

View File

@@ -1,5 +1,9 @@
# Task Master
[![CI](https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml/badge.svg)](https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml)
[![License: MIT with Commons Clause](https://img.shields.io/badge/license-MIT%20with%20Commons%20Clause-blue.svg)](LICENSE)
[![npm version](https://badge.fury.io/js/task-master-ai.svg)](https://badge.fury.io/js/task-master-ai)
### by [@eyaltoledano](https://x.com/eyaltoledano)
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.

View File

@@ -21,9 +21,11 @@ In an AI-driven development process—particularly with tools like [Cursor](http
The script can be configured through environment variables in a `.env` file at the root of the project:
### Required Configuration
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
### Optional Configuration
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
@@ -38,9 +40,10 @@ The script can be configured through environment variables in a `.env` file at t
## How It Works
1. **`tasks.json`**:
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
1. **`tasks.json`**:
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
- Tasks can have `subtasks` for more detailed implementation steps.
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
@@ -50,7 +53,7 @@ The script can be configured through environment variables in a `.env` file at t
```bash
# If installed globally
task-master [command] [options]
# If using locally within the project
node scripts/dev.js [command] [options]
```
@@ -111,6 +114,7 @@ task-master update --file=custom-tasks.json --from=5 --prompt="Change database f
```
Notes:
- The `--prompt` parameter is required and should explain the changes or new context
- Only tasks that aren't marked as 'done' will be updated
- Tasks with ID >= the specified --from value will be updated
@@ -134,6 +138,7 @@ task-master set-status --id=1,2,3 --status=done
```
Notes:
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
- You can specify multiple task IDs by separating them with commas
@@ -183,6 +188,7 @@ task-master clear-subtasks --all
```
Notes:
- After clearing subtasks, task files are automatically regenerated
- This is useful when you want to regenerate subtasks with a different approach
- Can be combined with the `expand` command to immediately generate new subtasks
@@ -198,6 +204,7 @@ The script integrates with two AI services:
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
To use the Perplexity integration:
1. Obtain a Perplexity API key
2. Add `PERPLEXITY_API_KEY` to your `.env` file
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
@@ -206,6 +213,7 @@ To use the Perplexity integration:
## Logging
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
- `debug`: Detailed information, typically useful for troubleshooting
- `info`: Confirmation that things are working as expected (default)
- `warn`: Warning messages that don't prevent execution
@@ -228,17 +236,20 @@ task-master remove-dependency --id=<id> --depends-on=<id>
These commands:
1. **Allow precise dependency management**:
- Add dependencies between tasks with automatic validation
- Remove dependencies when they're no longer needed
- Update task files automatically after changes
2. **Include validation checks**:
- Prevent circular dependencies (a task depending on itself)
- Prevent duplicate dependencies
- Verify that both tasks exist before adding/removing dependencies
- Check if dependencies exist before attempting to remove them
3. **Provide clear feedback**:
- Success messages confirm when dependencies are added/removed
- Error messages explain why operations failed (if applicable)
@@ -263,6 +274,7 @@ task-master validate-dependencies --file=custom-tasks.json
```
This command:
- Scans all tasks and subtasks for non-existent dependencies
- Identifies potential self-dependencies (tasks referencing themselves)
- Reports all found issues without modifying files
@@ -284,6 +296,7 @@ task-master fix-dependencies --file=custom-tasks.json
```
This command:
1. **Validates all dependencies** across tasks and subtasks
2. **Automatically removes**:
- References to non-existent tasks and subtasks
@@ -321,6 +334,7 @@ task-master analyze-complexity --research
```
Notes:
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
- Tasks are scored on a scale of 1-10
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
@@ -345,33 +359,35 @@ task-master expand --id=8 --num=5 --prompt="Custom prompt"
```
When a complexity report exists:
- The `expand` command will use the recommended subtask count from the report (unless overridden)
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
- When using `--all`, tasks are sorted by complexity score (highest first)
- The `--research` flag is preserved from the complexity analysis to expansion
The output report structure is:
```json
{
"meta": {
"generatedAt": "2023-06-15T12:34:56.789Z",
"tasksAnalyzed": 20,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9.5,
"recommendedSubtasks": 6,
"expansionPrompt": "Create subtasks that handle detecting...",
"reasoning": "This task requires sophisticated logic...",
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
},
// More tasks sorted by complexity score (highest first)
]
"meta": {
"generatedAt": "2023-06-15T12:34:56.789Z",
"tasksAnalyzed": 20,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9.5,
"recommendedSubtasks": 6,
"expansionPrompt": "Create subtasks that handle detecting...",
"reasoning": "This task requires sophisticated logic...",
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
}
// More tasks sorted by complexity score (highest first)
]
}
```
@@ -438,4 +454,4 @@ This command:
- Commands for working with subtasks
- For subtasks, provides a link to view the parent task
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.

View File

@@ -20,11 +20,11 @@ const args = process.argv.slice(2);
// Spawn the init script with all arguments
const child = spawn('node', [initScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
stdio: 'inherit',
cwd: process.cwd()
});
// Handle exit
child.on('close', (code) => {
process.exit(code);
});
process.exit(code);
});

View File

@@ -44,30 +44,36 @@ const initScriptPath = resolve(__dirname, '../scripts/init.js');
// Helper function to run dev.js with arguments
function runDevScript(args) {
// Debug: Show the transformed arguments when DEBUG=1 is set
if (process.env.DEBUG === '1') {
console.error('\nDEBUG - CLI Wrapper Analysis:');
console.error('- Original command: ' + process.argv.join(' '));
console.error('- Transformed args: ' + args.join(' '));
console.error('- dev.js will receive: node ' + devScriptPath + ' ' + args.join(' ') + '\n');
}
// For testing: If TEST_MODE is set, just print args and exit
if (process.env.TEST_MODE === '1') {
console.log('Would execute:');
console.log(`node ${devScriptPath} ${args.join(' ')}`);
process.exit(0);
return;
}
const child = spawn('node', [devScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
// Debug: Show the transformed arguments when DEBUG=1 is set
if (process.env.DEBUG === '1') {
console.error('\nDEBUG - CLI Wrapper Analysis:');
console.error('- Original command: ' + process.argv.join(' '));
console.error('- Transformed args: ' + args.join(' '));
console.error(
'- dev.js will receive: node ' +
devScriptPath +
' ' +
args.join(' ') +
'\n'
);
}
// For testing: If TEST_MODE is set, just print args and exit
if (process.env.TEST_MODE === '1') {
console.log('Would execute:');
console.log(`node ${devScriptPath} ${args.join(' ')}`);
process.exit(0);
return;
}
const child = spawn('node', [devScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
}
// Helper function to detect camelCase and convert to kebab-case
@@ -79,228 +85,239 @@ const toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();
* @returns {Function} Wrapper action function
*/
function createDevScriptAction(commandName) {
return (options, cmd) => {
// Check for camelCase flags and error out with helpful message
const camelCaseFlags = detectCamelCaseFlags(process.argv);
// If camelCase flags were found, show error and exit
if (camelCaseFlags.length > 0) {
console.error('\nError: Please use kebab-case for CLI flags:');
camelCaseFlags.forEach(flag => {
console.error(` Instead of: --${flag.original}`);
console.error(` Use: --${flag.kebabCase}`);
});
console.error('\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n');
process.exit(1);
}
// Since we've ensured no camelCase flags, we can now just:
// 1. Start with the command name
const args = [commandName];
// 3. Get positional arguments and explicit flags from the command line
const commandArgs = [];
const positionals = new Set(); // Track positional args we've seen
// Find the command in raw process.argv to extract args
const commandIndex = process.argv.indexOf(commandName);
if (commandIndex !== -1) {
// Process all args after the command name
for (let i = commandIndex + 1; i < process.argv.length; i++) {
const arg = process.argv[i];
if (arg.startsWith('--')) {
// It's a flag - pass through as is
commandArgs.push(arg);
// Skip the next arg if this is a flag with a value (not --flag=value format)
if (!arg.includes('=') &&
i + 1 < process.argv.length &&
!process.argv[i+1].startsWith('--')) {
commandArgs.push(process.argv[++i]);
}
} else if (!positionals.has(arg)) {
// It's a positional argument we haven't seen
commandArgs.push(arg);
positionals.add(arg);
}
}
}
// Add all command line args we collected
args.push(...commandArgs);
// 4. Add default options from Commander if not specified on command line
// Track which options we've seen on the command line
const userOptions = new Set();
for (const arg of commandArgs) {
if (arg.startsWith('--')) {
// Extract option name (without -- and value)
const name = arg.split('=')[0].slice(2);
userOptions.add(name);
// Add the kebab-case version too, to prevent duplicates
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
userOptions.add(kebabName);
// Add the camelCase version as well
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) => letter.toUpperCase());
userOptions.add(camelName);
}
}
// Add Commander-provided defaults for options not specified by user
Object.entries(options).forEach(([key, value]) => {
// Debug output to see what keys we're getting
if (process.env.DEBUG === '1') {
console.error(`DEBUG - Processing option: ${key} = ${value}`);
}
return (options, cmd) => {
// Check for camelCase flags and error out with helpful message
const camelCaseFlags = detectCamelCaseFlags(process.argv);
// Special case for numTasks > num-tasks (a known problem case)
if (key === 'numTasks') {
if (process.env.DEBUG === '1') {
console.error('DEBUG - Converting numTasks to num-tasks');
}
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
args.push(`--num-tasks=${value}`);
}
return;
}
// Skip built-in Commander properties and options the user provided
if (['parent', 'commands', 'options', 'rawArgs'].includes(key) || userOptions.has(key)) {
return;
}
// Also check the kebab-case version of this key
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
if (userOptions.has(kebabKey)) {
return;
}
// Add default values, using kebab-case for the parameter name
if (value !== undefined) {
if (typeof value === 'boolean') {
if (value === true) {
args.push(`--${kebabKey}`);
} else if (value === false && key === 'generate') {
args.push('--skip-generate');
}
} else {
// Always use kebab-case for option names
args.push(`--${kebabKey}=${value}`);
}
}
});
// Special handling for parent parameter (uses -p)
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
args.push('-p', options.parent);
}
// Debug output for troubleshooting
if (process.env.DEBUG === '1') {
console.error('DEBUG - Command args:', commandArgs);
console.error('DEBUG - User options:', Array.from(userOptions));
console.error('DEBUG - Commander options:', options);
console.error('DEBUG - Final args:', args);
}
// Run the script with our processed args
runDevScript(args);
};
// If camelCase flags were found, show error and exit
if (camelCaseFlags.length > 0) {
console.error('\nError: Please use kebab-case for CLI flags:');
camelCaseFlags.forEach((flag) => {
console.error(` Instead of: --${flag.original}`);
console.error(` Use: --${flag.kebabCase}`);
});
console.error(
'\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n'
);
process.exit(1);
}
// Since we've ensured no camelCase flags, we can now just:
// 1. Start with the command name
const args = [commandName];
// 3. Get positional arguments and explicit flags from the command line
const commandArgs = [];
const positionals = new Set(); // Track positional args we've seen
// Find the command in raw process.argv to extract args
const commandIndex = process.argv.indexOf(commandName);
if (commandIndex !== -1) {
// Process all args after the command name
for (let i = commandIndex + 1; i < process.argv.length; i++) {
const arg = process.argv[i];
if (arg.startsWith('--')) {
// It's a flag - pass through as is
commandArgs.push(arg);
// Skip the next arg if this is a flag with a value (not --flag=value format)
if (
!arg.includes('=') &&
i + 1 < process.argv.length &&
!process.argv[i + 1].startsWith('--')
) {
commandArgs.push(process.argv[++i]);
}
} else if (!positionals.has(arg)) {
// It's a positional argument we haven't seen
commandArgs.push(arg);
positionals.add(arg);
}
}
}
// Add all command line args we collected
args.push(...commandArgs);
// 4. Add default options from Commander if not specified on command line
// Track which options we've seen on the command line
const userOptions = new Set();
for (const arg of commandArgs) {
if (arg.startsWith('--')) {
// Extract option name (without -- and value)
const name = arg.split('=')[0].slice(2);
userOptions.add(name);
// Add the kebab-case version too, to prevent duplicates
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
userOptions.add(kebabName);
// Add the camelCase version as well
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) =>
letter.toUpperCase()
);
userOptions.add(camelName);
}
}
// Add Commander-provided defaults for options not specified by user
Object.entries(options).forEach(([key, value]) => {
// Debug output to see what keys we're getting
if (process.env.DEBUG === '1') {
console.error(`DEBUG - Processing option: ${key} = ${value}`);
}
// Special case for numTasks > num-tasks (a known problem case)
if (key === 'numTasks') {
if (process.env.DEBUG === '1') {
console.error('DEBUG - Converting numTasks to num-tasks');
}
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
args.push(`--num-tasks=${value}`);
}
return;
}
// Skip built-in Commander properties and options the user provided
if (
['parent', 'commands', 'options', 'rawArgs'].includes(key) ||
userOptions.has(key)
) {
return;
}
// Also check the kebab-case version of this key
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
if (userOptions.has(kebabKey)) {
return;
}
// Add default values, using kebab-case for the parameter name
if (value !== undefined) {
if (typeof value === 'boolean') {
if (value === true) {
args.push(`--${kebabKey}`);
} else if (value === false && key === 'generate') {
args.push('--skip-generate');
}
} else {
// Always use kebab-case for option names
args.push(`--${kebabKey}=${value}`);
}
}
});
// Special handling for parent parameter (uses -p)
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
args.push('-p', options.parent);
}
// Debug output for troubleshooting
if (process.env.DEBUG === '1') {
console.error('DEBUG - Command args:', commandArgs);
console.error('DEBUG - User options:', Array.from(userOptions));
console.error('DEBUG - Commander options:', options);
console.error('DEBUG - Final args:', args);
}
// Run the script with our processed args
runDevScript(args);
};
}
// Special case for the 'init' command which uses a different script
function registerInitCommand(program) {
program
.command('init')
.description('Initialize a new project')
.option('-y, --yes', 'Skip prompts and use default values')
.option('-n, --name <name>', 'Project name')
.option('-d, --description <description>', 'Project description')
.option('-v, --version <version>', 'Project version')
.option('-a, --author <author>', 'Author name')
.option('--skip-install', 'Skip installing dependencies')
.option('--dry-run', 'Show what would be done without making changes')
.action((options) => {
// Pass through any options to the init script
const args = ['--yes', 'name', 'description', 'version', 'author', 'skip-install', 'dry-run']
.filter(opt => options[opt])
.map(opt => {
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
return `--${opt}`;
}
return `--${opt}=${options[opt]}`;
});
const child = spawn('node', [initScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('init')
.description('Initialize a new project')
.option('-y, --yes', 'Skip prompts and use default values')
.option('-n, --name <name>', 'Project name')
.option('-d, --description <description>', 'Project description')
.option('-v, --version <version>', 'Project version')
.option('-a, --author <author>', 'Author name')
.option('--skip-install', 'Skip installing dependencies')
.option('--dry-run', 'Show what would be done without making changes')
.action((options) => {
// Pass through any options to the init script
const args = [
'--yes',
'name',
'description',
'version',
'author',
'skip-install',
'dry-run'
]
.filter((opt) => options[opt])
.map((opt) => {
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
return `--${opt}`;
}
return `--${opt}=${options[opt]}`;
});
const child = spawn('node', [initScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
}
// Set up the command-line interface
const program = new Command();
program
.name('task-master')
.description('Claude Task Master CLI')
.version(version)
.addHelpText('afterAll', () => {
// Use the same help display function as dev.js for consistency
displayHelp();
return ''; // Return empty string to prevent commander's default help
});
.name('task-master')
.description('Claude Task Master CLI')
.version(version)
.addHelpText('afterAll', () => {
// Use the same help display function as dev.js for consistency
displayHelp();
return ''; // Return empty string to prevent commander's default help
});
// Add custom help option to directly call our help display
program.helpOption('-h, --help', 'Display help information');
program.on('--help', () => {
displayHelp();
displayHelp();
});
// Add special case commands
registerInitCommand(program);
program
.command('dev')
.description('Run the dev.js script')
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
runDevScript(args);
});
.command('dev')
.description('Run the dev.js script')
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
runDevScript(args);
});
// Use a temporary Command instance to get all command definitions
const tempProgram = new Command();
registerCommands(tempProgram);
// For each command in the temp instance, add a modified version to our actual program
tempProgram.commands.forEach(cmd => {
if (['init', 'dev'].includes(cmd.name())) {
// Skip commands we've already defined specially
return;
}
// Create a new command with the same name and description
const newCmd = program
.command(cmd.name())
.description(cmd.description());
// Copy all options
cmd.options.forEach(opt => {
newCmd.option(
opt.flags,
opt.description,
opt.defaultValue
);
});
// Set the action to proxy to dev.js
newCmd.action(createDevScriptAction(cmd.name()));
tempProgram.commands.forEach((cmd) => {
if (['init', 'dev'].includes(cmd.name())) {
// Skip commands we've already defined specially
return;
}
// Create a new command with the same name and description
const newCmd = program.command(cmd.name()).description(cmd.description());
// Copy all options
cmd.options.forEach((opt) => {
newCmd.option(opt.flags, opt.description, opt.defaultValue);
});
// Set the action to proxy to dev.js
newCmd.action(createDevScriptAction(cmd.name()));
});
// Parse the command line arguments
@@ -308,47 +325,56 @@ program.parse(process.argv);
// Add global error handling for unknown commands and options
process.on('uncaughtException', (err) => {
// Check if this is a commander.js unknown option error
if (err.code === 'commander.unknownOption') {
const option = err.message.match(/'([^']+)'/)?.[1];
const commandArg = process.argv.find(arg => !arg.startsWith('-') &&
arg !== 'task-master' &&
!arg.includes('/') &&
arg !== 'node');
const command = commandArg || 'unknown';
console.error(chalk.red(`Error: Unknown option '${option}'`));
console.error(chalk.yellow(`Run 'task-master ${command} --help' to see available options for this command`));
process.exit(1);
}
// Check if this is a commander.js unknown command error
if (err.code === 'commander.unknownCommand') {
const command = err.message.match(/'([^']+)'/)?.[1];
console.error(chalk.red(`Error: Unknown command '${command}'`));
console.error(chalk.yellow(`Run 'task-master --help' to see available commands`));
process.exit(1);
}
// Handle other uncaught exceptions
console.error(chalk.red(`Error: ${err.message}`));
if (process.env.DEBUG === '1') {
console.error(err);
}
process.exit(1);
// Check if this is a commander.js unknown option error
if (err.code === 'commander.unknownOption') {
const option = err.message.match(/'([^']+)'/)?.[1];
const commandArg = process.argv.find(
(arg) =>
!arg.startsWith('-') &&
arg !== 'task-master' &&
!arg.includes('/') &&
arg !== 'node'
);
const command = commandArg || 'unknown';
console.error(chalk.red(`Error: Unknown option '${option}'`));
console.error(
chalk.yellow(
`Run 'task-master ${command} --help' to see available options for this command`
)
);
process.exit(1);
}
// Check if this is a commander.js unknown command error
if (err.code === 'commander.unknownCommand') {
const command = err.message.match(/'([^']+)'/)?.[1];
console.error(chalk.red(`Error: Unknown command '${command}'`));
console.error(
chalk.yellow(`Run 'task-master --help' to see available commands`)
);
process.exit(1);
}
// Handle other uncaught exceptions
console.error(chalk.red(`Error: ${err.message}`));
if (process.env.DEBUG === '1') {
console.error(err);
}
process.exit(1);
});
// Show help if no command was provided (just 'task-master' with no args)
if (process.argv.length <= 2) {
displayBanner();
displayHelp();
process.exit(0);
displayBanner();
displayHelp();
process.exit(0);
}
// Add exports at the end of the file
if (typeof module !== 'undefined') {
module.exports = {
detectCamelCaseFlags
};
}
module.exports = {
detectCamelCaseFlags
};
}

View File

@@ -41,39 +41,39 @@ Core functions should follow this pattern to support both CLI and MCP use:
* @returns {Object|undefined} - Returns data when source is 'mcp'
*/
function exampleFunction(param1, param2, options = {}) {
try {
// Skip UI for MCP
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Processing operation...'));
}
// Do the core business logic
const result = doSomething(param1, param2);
// For MCP, return structured data
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// For CLI, display output
console.log(chalk.green('Operation completed successfully!'));
} catch (error) {
// Handle errors based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
// CLI error handling
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
try {
// Skip UI for MCP
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Processing operation...'));
}
// Do the core business logic
const result = doSomething(param1, param2);
// For MCP, return structured data
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// For CLI, display output
console.log(chalk.green('Operation completed successfully!'));
} catch (error) {
// Handle errors based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
// CLI error handling
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
}
```
@@ -89,17 +89,17 @@ export const simpleFunction = adaptForMcp(originalFunction);
// Split implementation - completely different code paths for CLI vs MCP
export const complexFunction = sourceSplitFunction(
// CLI version with UI
function(param1, param2) {
displayBanner();
console.log(`Processing ${param1}...`);
// ... CLI implementation
},
// MCP version with structured return
function(param1, param2, options = {}) {
// ... MCP implementation
return { success: true, data };
}
// CLI version with UI
function (param1, param2) {
displayBanner();
console.log(`Processing ${param1}...`);
// ... CLI implementation
},
// MCP version with structured return
function (param1, param2, options = {}) {
// ... MCP implementation
return { success: true, data };
}
);
```
@@ -110,7 +110,7 @@ When adding new features, follow these steps to ensure CLI and MCP compatibility
1. **Implement Core Logic** in the appropriate module file
2. **Add Source Parameter Support** using the pattern above
3. **Add to task-master-core.js** to make it available for direct import
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
5. **Create Tool Implementation** in `mcp-server/src/tools/`
6. **Register the Tool** in `mcp-server/src/tools/index.js`
@@ -119,39 +119,39 @@ When adding new features, follow these steps to ensure CLI and MCP compatibility
```javascript
// In scripts/modules/task-manager.js
export async function newFeature(param1, param2, options = {}) {
try {
// Source-specific UI
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Running new feature...'));
}
// Shared core logic
const result = processFeature(param1, param2);
// Source-specific return handling
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// CLI output
console.log(chalk.green('Feature completed successfully!'));
displayOutput(result);
} catch (error) {
// Error handling based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
try {
// Source-specific UI
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Running new feature...'));
}
// Shared core logic
const result = processFeature(param1, param2);
// Source-specific return handling
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// CLI output
console.log(chalk.green('Feature completed successfully!'));
displayOutput(result);
} catch (error) {
// Error handling based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
}
```
@@ -163,12 +163,12 @@ import { newFeature } from '../../../scripts/modules/task-manager.js';
// Add to exports
export default {
// ... existing functions
async newFeature(args = {}, options = {}) {
const { param1, param2 } = args;
return executeFunction(newFeature, [param1, param2], options);
}
// ... existing functions
async newFeature(args = {}, options = {}) {
const { param1, param2 } = args;
return executeFunction(newFeature, [param1, param2], options);
}
};
```
@@ -177,8 +177,8 @@ export default {
```javascript
// In mcp-server/src/tools/utils.js
const commandMap = {
// ... existing mappings
'new-feature': 'newFeature'
// ... existing mappings
'new-feature': 'newFeature'
};
```
@@ -186,53 +186,53 @@ const commandMap = {
```javascript
// In mcp-server/src/tools/newFeature.js
import { z } from "zod";
import { z } from 'zod';
import {
executeTaskMasterCommand,
createContentResponse,
createErrorResponse,
} from "./utils.js";
executeTaskMasterCommand,
createContentResponse,
createErrorResponse
} from './utils.js';
export function registerNewFeatureTool(server) {
server.addTool({
name: "newFeature",
description: "Run the new feature",
parameters: z.object({
param1: z.string().describe("First parameter"),
param2: z.number().optional().describe("Second parameter"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z.string().describe("Root directory of the project")
}),
execute: async (args, { log }) => {
try {
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
server.addTool({
name: 'newFeature',
description: 'Run the new feature',
parameters: z.object({
param1: z.string().describe('First parameter'),
param2: z.number().optional().describe('Second parameter'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z.string().describe('Root directory of the project')
}),
execute: async (args, { log }) => {
try {
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
const cmdArgs = [];
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
if (args.file) cmdArgs.push(`--file=${args.file}`);
const cmdArgs = [];
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
if (args.file) cmdArgs.push(`--file=${args.file}`);
const projectRoot = args.projectRoot;
const projectRoot = args.projectRoot;
// Execute the command
const result = await executeTaskMasterCommand(
"new-feature",
log,
cmdArgs,
projectRoot
);
// Execute the command
const result = await executeTaskMasterCommand(
'new-feature',
log,
cmdArgs,
projectRoot
);
if (!result.success) {
throw new Error(result.error);
}
if (!result.success) {
throw new Error(result.error);
}
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error in new feature: ${error.message}`);
return createErrorResponse(`Error in new feature: ${error.message}`);
}
},
});
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error in new feature: ${error.message}`);
return createErrorResponse(`Error in new feature: ${error.message}`);
}
}
});
}
```
@@ -240,11 +240,11 @@ export function registerNewFeatureTool(server) {
```javascript
// In mcp-server/src/tools/index.js
import { registerNewFeatureTool } from "./newFeature.js";
import { registerNewFeatureTool } from './newFeature.js';
export function registerTaskMasterTools(server) {
// ... existing registrations
registerNewFeatureTool(server);
// ... existing registrations
registerNewFeatureTool(server);
}
```
@@ -266,4 +266,4 @@ node mcp-server/tests/test-command.js newFeature
2. **Structured Data for MCP** - Return clean JSON objects from MCP source functions
3. **Consistent Error Handling** - Standardize error formats for both interfaces
4. **Documentation** - Update MCP tool documentation when adding new features
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature

File diff suppressed because it is too large Load Diff

190
index.js
View File

@@ -41,27 +41,27 @@ export const devScriptPath = resolve(__dirname, './scripts/dev.js');
// Export a function to initialize a new project programmatically
export const initProject = async (options = {}) => {
const init = await import('./scripts/init.js');
return init.initializeProject(options);
const init = await import('./scripts/init.js');
return init.initializeProject(options);
};
// Export a function to run init as a CLI command
export const runInitCLI = async () => {
// Using spawn to ensure proper handling of stdio and process exit
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
stdio: 'inherit',
cwd: process.cwd()
});
return new Promise((resolve, reject) => {
child.on('close', (code) => {
if (code === 0) {
resolve();
} else {
reject(new Error(`Init script exited with code ${code}`));
}
});
});
// Using spawn to ensure proper handling of stdio and process exit
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
stdio: 'inherit',
cwd: process.cwd()
});
return new Promise((resolve, reject) => {
child.on('close', (code) => {
if (code === 0) {
resolve();
} else {
reject(new Error(`Init script exited with code ${code}`));
}
});
});
};
// Export version information
@@ -69,81 +69,81 @@ export const version = packageJson.version;
// CLI implementation
if (import.meta.url === `file://${process.argv[1]}`) {
const program = new Command();
program
.name('task-master')
.description('Claude Task Master CLI')
.version(version);
program
.command('init')
.description('Initialize a new project')
.action(() => {
runInitCLI().catch(err => {
console.error('Init failed:', err.message);
process.exit(1);
});
});
program
.command('dev')
.description('Run the dev.js script')
.allowUnknownOption(true)
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
const child = spawn('node', [devScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
// Add shortcuts for common dev.js commands
program
.command('list')
.description('List all tasks')
.action(() => {
const child = spawn('node', [devScriptPath, 'list'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('next')
.description('Show the next task to work on')
.action(() => {
const child = spawn('node', [devScriptPath, 'next'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('generate')
.description('Generate task files')
.action(() => {
const child = spawn('node', [devScriptPath, 'generate'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program.parse(process.argv);
}
const program = new Command();
program
.name('task-master')
.description('Claude Task Master CLI')
.version(version);
program
.command('init')
.description('Initialize a new project')
.action(() => {
runInitCLI().catch((err) => {
console.error('Init failed:', err.message);
process.exit(1);
});
});
program
.command('dev')
.description('Run the dev.js script')
.allowUnknownOption(true)
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
const child = spawn('node', [devScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
// Add shortcuts for common dev.js commands
program
.command('list')
.description('List all tasks')
.action(() => {
const child = spawn('node', [devScriptPath, 'list'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('next')
.description('Show the next task to work on')
.action(() => {
const child = spawn('node', [devScriptPath, 'next'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('generate')
.description('Generate task files')
.action(() => {
const child = spawn('node', [devScriptPath, 'generate'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program.parse(process.argv);
}

View File

@@ -1,56 +1,56 @@
export default {
// Use Node.js environment for testing
testEnvironment: 'node',
// Automatically clear mock calls between every test
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
collectCoverage: false,
// The directory where Jest should output its coverage files
coverageDirectory: 'coverage',
// A list of paths to directories that Jest should use to search for files in
roots: ['<rootDir>/tests'],
// The glob patterns Jest uses to detect test files
testMatch: [
'**/__tests__/**/*.js',
'**/?(*.)+(spec|test).js',
'**/tests/*.test.js'
],
// Transform files
transform: {},
// Disable transformations for node_modules
transformIgnorePatterns: ['/node_modules/'],
// Set moduleNameMapper for absolute paths
moduleNameMapper: {
'^@/(.*)$': '<rootDir>/$1'
},
// Setup module aliases
moduleDirectories: ['node_modules', '<rootDir>'],
// Configure test coverage thresholds
coverageThreshold: {
global: {
branches: 80,
functions: 80,
lines: 80,
statements: 80
}
},
// Generate coverage report in these formats
coverageReporters: ['text', 'lcov'],
// Verbose output
verbose: true,
// Setup file
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
};
// Use Node.js environment for testing
testEnvironment: 'node',
// Automatically clear mock calls between every test
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
collectCoverage: false,
// The directory where Jest should output its coverage files
coverageDirectory: 'coverage',
// A list of paths to directories that Jest should use to search for files in
roots: ['<rootDir>/tests'],
// The glob patterns Jest uses to detect test files
testMatch: [
'**/__tests__/**/*.js',
'**/?(*.)+(spec|test).js',
'**/tests/*.test.js'
],
// Transform files
transform: {},
// Disable transformations for node_modules
transformIgnorePatterns: ['/node_modules/'],
// Set moduleNameMapper for absolute paths
moduleNameMapper: {
'^@/(.*)$': '<rootDir>/$1'
},
// Setup module aliases
moduleDirectories: ['node_modules', '<rootDir>'],
// Configure test coverage thresholds
coverageThreshold: {
global: {
branches: 80,
functions: 80,
lines: 80,
statements: 80
}
},
// Generate coverage report in these formats
coverageReporters: ['text', 'lcov'],
// Verbose output
verbose: true,
// Setup file
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
};

View File

@@ -1,8 +1,8 @@
#!/usr/bin/env node
import TaskMasterMCPServer from "./src/index.js";
import dotenv from "dotenv";
import logger from "./src/logger.js";
import TaskMasterMCPServer from './src/index.js';
import dotenv from 'dotenv';
import logger from './src/logger.js';
// Load environment variables
dotenv.config();
@@ -11,25 +11,25 @@ dotenv.config();
* Start the MCP server
*/
async function startServer() {
const server = new TaskMasterMCPServer();
const server = new TaskMasterMCPServer();
// Handle graceful shutdown
process.on("SIGINT", async () => {
await server.stop();
process.exit(0);
});
// Handle graceful shutdown
process.on('SIGINT', async () => {
await server.stop();
process.exit(0);
});
process.on("SIGTERM", async () => {
await server.stop();
process.exit(0);
});
process.on('SIGTERM', async () => {
await server.stop();
process.exit(0);
});
try {
await server.start();
} catch (error) {
logger.error(`Failed to start MCP server: ${error.message}`);
process.exit(1);
}
try {
await server.start();
} catch (error) {
logger.error(`Failed to start MCP server: ${error.message}`);
process.exit(1);
}
}
// Start the server

View File

@@ -2,84 +2,90 @@ import { jest } from '@jest/globals';
import { ContextManager } from '../context-manager.js';
describe('ContextManager', () => {
let contextManager;
let contextManager;
beforeEach(() => {
contextManager = new ContextManager({
maxCacheSize: 10,
ttl: 1000, // 1 second for testing
maxContextSize: 1000
});
});
beforeEach(() => {
contextManager = new ContextManager({
maxCacheSize: 10,
ttl: 1000, // 1 second for testing
maxContextSize: 1000
});
});
describe('getContext', () => {
it('should create a new context when not in cache', async () => {
const context = await contextManager.getContext('test-id', { test: true });
expect(context.id).toBe('test-id');
expect(context.metadata.test).toBe(true);
expect(contextManager.stats.misses).toBe(1);
expect(contextManager.stats.hits).toBe(0);
});
describe('getContext', () => {
it('should create a new context when not in cache', async () => {
const context = await contextManager.getContext('test-id', {
test: true
});
expect(context.id).toBe('test-id');
expect(context.metadata.test).toBe(true);
expect(contextManager.stats.misses).toBe(1);
expect(contextManager.stats.hits).toBe(0);
});
it('should return cached context when available', async () => {
// First call creates the context
await contextManager.getContext('test-id', { test: true });
// Second call should hit cache
const context = await contextManager.getContext('test-id', { test: true });
expect(context.id).toBe('test-id');
expect(context.metadata.test).toBe(true);
expect(contextManager.stats.hits).toBe(1);
expect(contextManager.stats.misses).toBe(1);
});
it('should return cached context when available', async () => {
// First call creates the context
await contextManager.getContext('test-id', { test: true });
it('should respect TTL settings', async () => {
// Create context
await contextManager.getContext('test-id', { test: true });
// Wait for TTL to expire
await new Promise(resolve => setTimeout(resolve, 1100));
// Should create new context
await contextManager.getContext('test-id', { test: true });
expect(contextManager.stats.misses).toBe(2);
expect(contextManager.stats.hits).toBe(0);
});
});
// Second call should hit cache
const context = await contextManager.getContext('test-id', {
test: true
});
expect(context.id).toBe('test-id');
expect(context.metadata.test).toBe(true);
expect(contextManager.stats.hits).toBe(1);
expect(contextManager.stats.misses).toBe(1);
});
describe('updateContext', () => {
it('should update existing context metadata', async () => {
await contextManager.getContext('test-id', { initial: true });
const updated = await contextManager.updateContext('test-id', { updated: true });
expect(updated.metadata.initial).toBe(true);
expect(updated.metadata.updated).toBe(true);
});
});
it('should respect TTL settings', async () => {
// Create context
await contextManager.getContext('test-id', { test: true });
describe('invalidateContext', () => {
it('should remove context from cache', async () => {
await contextManager.getContext('test-id', { test: true });
contextManager.invalidateContext('test-id', { test: true });
// Should be a cache miss
await contextManager.getContext('test-id', { test: true });
expect(contextManager.stats.invalidations).toBe(1);
expect(contextManager.stats.misses).toBe(2);
});
});
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, 1100));
describe('getStats', () => {
it('should return current cache statistics', async () => {
await contextManager.getContext('test-id', { test: true });
const stats = contextManager.getStats();
expect(stats.hits).toBe(0);
expect(stats.misses).toBe(1);
expect(stats.invalidations).toBe(0);
expect(stats.size).toBe(1);
expect(stats.maxSize).toBe(10);
expect(stats.ttl).toBe(1000);
});
});
});
// Should create new context
await contextManager.getContext('test-id', { test: true });
expect(contextManager.stats.misses).toBe(2);
expect(contextManager.stats.hits).toBe(0);
});
});
describe('updateContext', () => {
it('should update existing context metadata', async () => {
await contextManager.getContext('test-id', { initial: true });
const updated = await contextManager.updateContext('test-id', {
updated: true
});
expect(updated.metadata.initial).toBe(true);
expect(updated.metadata.updated).toBe(true);
});
});
describe('invalidateContext', () => {
it('should remove context from cache', async () => {
await contextManager.getContext('test-id', { test: true });
contextManager.invalidateContext('test-id', { test: true });
// Should be a cache miss
await contextManager.getContext('test-id', { test: true });
expect(contextManager.stats.invalidations).toBe(1);
expect(contextManager.stats.misses).toBe(2);
});
});
describe('getStats', () => {
it('should return current cache statistics', async () => {
await contextManager.getContext('test-id', { test: true });
const stats = contextManager.getStats();
expect(stats.hits).toBe(0);
expect(stats.misses).toBe(1);
expect(stats.invalidations).toBe(0);
expect(stats.size).toBe(1);
expect(stats.maxSize).toBe(10);
expect(stats.ttl).toBe(1000);
});
});
});

View File

@@ -15,156 +15,157 @@ import { LRUCache } from 'lru-cache';
*/
export class ContextManager {
/**
* Create a new ContextManager instance
* @param {ContextManagerConfig} config - Configuration options
*/
constructor(config = {}) {
this.config = {
maxCacheSize: config.maxCacheSize || 1000,
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
maxContextSize: config.maxContextSize || 4000
};
/**
* Create a new ContextManager instance
* @param {ContextManagerConfig} config - Configuration options
*/
constructor(config = {}) {
this.config = {
maxCacheSize: config.maxCacheSize || 1000,
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
maxContextSize: config.maxContextSize || 4000
};
// Initialize LRU cache for context data
this.cache = new LRUCache({
max: this.config.maxCacheSize,
ttl: this.config.ttl,
updateAgeOnGet: true
});
// Initialize LRU cache for context data
this.cache = new LRUCache({
max: this.config.maxCacheSize,
ttl: this.config.ttl,
updateAgeOnGet: true
});
// Cache statistics
this.stats = {
hits: 0,
misses: 0,
invalidations: 0
};
}
// Cache statistics
this.stats = {
hits: 0,
misses: 0,
invalidations: 0
};
}
/**
* Create a new context or retrieve from cache
* @param {string} contextId - Unique identifier for the context
* @param {Object} metadata - Additional metadata for the context
* @returns {Object} Context object with metadata
*/
async getContext(contextId, metadata = {}) {
const cacheKey = this._getCacheKey(contextId, metadata);
// Try to get from cache first
const cached = this.cache.get(cacheKey);
if (cached) {
this.stats.hits++;
return cached;
}
/**
* Create a new context or retrieve from cache
* @param {string} contextId - Unique identifier for the context
* @param {Object} metadata - Additional metadata for the context
* @returns {Object} Context object with metadata
*/
async getContext(contextId, metadata = {}) {
const cacheKey = this._getCacheKey(contextId, metadata);
this.stats.misses++;
// Create new context if not in cache
const context = {
id: contextId,
metadata: {
...metadata,
created: new Date().toISOString()
}
};
// Try to get from cache first
const cached = this.cache.get(cacheKey);
if (cached) {
this.stats.hits++;
return cached;
}
// Cache the new context
this.cache.set(cacheKey, context);
return context;
}
this.stats.misses++;
/**
* Update an existing context
* @param {string} contextId - Context identifier
* @param {Object} updates - Updates to apply to the context
* @returns {Object} Updated context
*/
async updateContext(contextId, updates) {
const context = await this.getContext(contextId);
// Apply updates to context
Object.assign(context.metadata, updates);
// Update cache
const cacheKey = this._getCacheKey(contextId, context.metadata);
this.cache.set(cacheKey, context);
return context;
}
// Create new context if not in cache
const context = {
id: contextId,
metadata: {
...metadata,
created: new Date().toISOString()
}
};
/**
* Invalidate a context in the cache
* @param {string} contextId - Context identifier
* @param {Object} metadata - Metadata used in the cache key
*/
invalidateContext(contextId, metadata = {}) {
const cacheKey = this._getCacheKey(contextId, metadata);
this.cache.delete(cacheKey);
this.stats.invalidations++;
}
// Cache the new context
this.cache.set(cacheKey, context);
/**
* Get cached data associated with a specific key.
* Increments cache hit stats if found.
* @param {string} key - The cache key.
* @returns {any | undefined} The cached data or undefined if not found/expired.
*/
getCachedData(key) {
const cached = this.cache.get(key);
if (cached !== undefined) { // Check for undefined specifically, as null/false might be valid cached values
this.stats.hits++;
return cached;
}
this.stats.misses++;
return undefined;
}
return context;
}
/**
* Set data in the cache with a specific key.
* @param {string} key - The cache key.
* @param {any} data - The data to cache.
*/
setCachedData(key, data) {
this.cache.set(key, data);
}
/**
* Update an existing context
* @param {string} contextId - Context identifier
* @param {Object} updates - Updates to apply to the context
* @returns {Object} Updated context
*/
async updateContext(contextId, updates) {
const context = await this.getContext(contextId);
/**
* Invalidate a specific cache key.
* Increments invalidation stats.
* @param {string} key - The cache key to invalidate.
*/
invalidateCacheKey(key) {
this.cache.delete(key);
this.stats.invalidations++;
}
// Apply updates to context
Object.assign(context.metadata, updates);
/**
* Get cache statistics
* @returns {Object} Cache statistics
*/
getStats() {
return {
hits: this.stats.hits,
misses: this.stats.misses,
invalidations: this.stats.invalidations,
size: this.cache.size,
maxSize: this.config.maxCacheSize,
ttl: this.config.ttl
};
}
// Update cache
const cacheKey = this._getCacheKey(contextId, context.metadata);
this.cache.set(cacheKey, context);
/**
* Generate a cache key from context ID and metadata
* @private
* @deprecated No longer used for direct cache key generation outside the manager.
* Prefer generating specific keys in calling functions.
*/
_getCacheKey(contextId, metadata) {
// Kept for potential backward compatibility or internal use if needed later.
return `${contextId}:${JSON.stringify(metadata)}`;
}
return context;
}
/**
* Invalidate a context in the cache
* @param {string} contextId - Context identifier
* @param {Object} metadata - Metadata used in the cache key
*/
invalidateContext(contextId, metadata = {}) {
const cacheKey = this._getCacheKey(contextId, metadata);
this.cache.delete(cacheKey);
this.stats.invalidations++;
}
/**
* Get cached data associated with a specific key.
* Increments cache hit stats if found.
* @param {string} key - The cache key.
* @returns {any | undefined} The cached data or undefined if not found/expired.
*/
getCachedData(key) {
const cached = this.cache.get(key);
if (cached !== undefined) {
// Check for undefined specifically, as null/false might be valid cached values
this.stats.hits++;
return cached;
}
this.stats.misses++;
return undefined;
}
/**
* Set data in the cache with a specific key.
* @param {string} key - The cache key.
* @param {any} data - The data to cache.
*/
setCachedData(key, data) {
this.cache.set(key, data);
}
/**
* Invalidate a specific cache key.
* Increments invalidation stats.
* @param {string} key - The cache key to invalidate.
*/
invalidateCacheKey(key) {
this.cache.delete(key);
this.stats.invalidations++;
}
/**
* Get cache statistics
* @returns {Object} Cache statistics
*/
getStats() {
return {
hits: this.stats.hits,
misses: this.stats.misses,
invalidations: this.stats.invalidations,
size: this.cache.size,
maxSize: this.config.maxCacheSize,
ttl: this.config.ttl
};
}
/**
* Generate a cache key from context ID and metadata
* @private
* @deprecated No longer used for direct cache key generation outside the manager.
* Prefer generating specific keys in calling functions.
*/
_getCacheKey(contextId, metadata) {
// Kept for potential backward compatibility or internal use if needed later.
return `${contextId}:${JSON.stringify(metadata)}`;
}
}
// Export a singleton instance with default config
export const contextManager = new ContextManager();
export const contextManager = new ContextManager();

View File

@@ -1,7 +1,7 @@
/**
* task-master-core.js
* Direct function imports from Task Master modules
*
*
* This module provides direct access to Task Master core functions
* for improved performance and error handling compared to CLI execution.
*/
@@ -16,9 +16,9 @@ const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Import Task Master modules
import {
listTasks,
// We'll import more functions as we continue implementation
import {
listTasks
// We'll import more functions as we continue implementation
} from '../../../scripts/modules/task-manager.js';
// Import context manager
@@ -33,38 +33,40 @@ import { getCachedOrExecute } from '../tools/utils.js'; // Import the utility he
* @throws {Error} - If tasks.json cannot be found.
*/
function findTasksJsonPath(args, log) {
// Assume projectRoot is already normalized absolute path if passed in args
// Or use getProjectRoot if we decide to centralize that logic
const projectRoot = args.projectRoot || process.cwd();
log.info(`Searching for tasks.json within project root: ${projectRoot}`);
// Assume projectRoot is already normalized absolute path if passed in args
// Or use getProjectRoot if we decide to centralize that logic
const projectRoot = args.projectRoot || process.cwd();
log.info(`Searching for tasks.json within project root: ${projectRoot}`);
const possiblePaths = [];
const possiblePaths = [];
// 1. If a file is explicitly provided relative to projectRoot
if (args.file) {
possiblePaths.push(path.resolve(projectRoot, args.file));
}
// 1. If a file is explicitly provided relative to projectRoot
if (args.file) {
possiblePaths.push(path.resolve(projectRoot, args.file));
}
// 2. Check the standard locations relative to projectRoot
possiblePaths.push(
path.join(projectRoot, 'tasks.json'),
path.join(projectRoot, 'tasks', 'tasks.json')
);
// 2. Check the standard locations relative to projectRoot
possiblePaths.push(
path.join(projectRoot, 'tasks.json'),
path.join(projectRoot, 'tasks', 'tasks.json')
);
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
// Find the first existing path
for (const p of possiblePaths) {
if (fs.existsSync(p)) {
log.info(`Found tasks file at: ${p}`);
return p;
}
}
// Find the first existing path
for (const p of possiblePaths) {
if (fs.existsSync(p)) {
log.info(`Found tasks file at: ${p}`);
return p;
}
}
// If no file was found, throw an error
const error = new Error(`Tasks file not found in any of the expected locations relative to ${projectRoot}: ${possiblePaths.join(', ')}`);
error.code = 'TASKS_FILE_NOT_FOUND';
throw error;
// If no file was found, throw an error
const error = new Error(
`Tasks file not found in any of the expected locations relative to ${projectRoot}: ${possiblePaths.join(', ')}`
);
error.code = 'TASKS_FILE_NOT_FOUND';
throw error;
}
/**
@@ -75,60 +77,94 @@ function findTasksJsonPath(args, log) {
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
*/
export async function listTasksDirect(args, log) {
let tasksPath;
try {
// Find the tasks path first - needed for cache key and execution
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
if (error.code === 'TASKS_FILE_NOT_FOUND') {
log.error(`Tasks file not found: ${error.message}`);
// Return the error structure expected by the calling tool/handler
return { success: false, error: { code: error.code, message: error.message }, fromCache: false };
}
log.error(`Unexpected error finding tasks file: ${error.message}`);
// Re-throw for outer catch or return structured error
return { success: false, error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message }, fromCache: false };
}
let tasksPath;
try {
// Find the tasks path first - needed for cache key and execution
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
if (error.code === 'TASKS_FILE_NOT_FOUND') {
log.error(`Tasks file not found: ${error.message}`);
// Return the error structure expected by the calling tool/handler
return {
success: false,
error: { code: error.code, message: error.message },
fromCache: false
};
}
log.error(`Unexpected error finding tasks file: ${error.message}`);
// Re-throw for outer catch or return structured error
return {
success: false,
error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message },
fromCache: false
};
}
// Generate cache key *after* finding tasksPath
const statusFilter = args.status || 'all';
const withSubtasks = args.withSubtasks || false;
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
// Define the action function to be executed on cache miss
const coreListTasksAction = async () => {
try {
log.info(`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`);
const resultData = listTasks(tasksPath, statusFilter, withSubtasks, 'json');
// Generate cache key *after* finding tasksPath
const statusFilter = args.status || 'all';
const withSubtasks = args.withSubtasks || false;
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
if (!resultData || !resultData.tasks) {
log.error('Invalid or empty response from listTasks core function');
return { success: false, error: { code: 'INVALID_CORE_RESPONSE', message: 'Invalid or empty response from listTasks core function' } };
}
log.info(`Core listTasks function retrieved ${resultData.tasks.length} tasks`);
return { success: true, data: resultData };
// Define the action function to be executed on cache miss
const coreListTasksAction = async () => {
try {
log.info(
`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`
);
const resultData = listTasks(
tasksPath,
statusFilter,
withSubtasks,
'json'
);
} catch (error) {
log.error(`Core listTasks function failed: ${error.message}`);
return { success: false, error: { code: 'LIST_TASKS_CORE_ERROR', message: error.message || 'Failed to list tasks' } };
}
};
if (!resultData || !resultData.tasks) {
log.error('Invalid or empty response from listTasks core function');
return {
success: false,
error: {
code: 'INVALID_CORE_RESPONSE',
message: 'Invalid or empty response from listTasks core function'
}
};
}
log.info(
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
);
return { success: true, data: resultData };
} catch (error) {
log.error(`Core listTasks function failed: ${error.message}`);
return {
success: false,
error: {
code: 'LIST_TASKS_CORE_ERROR',
message: error.message || 'Failed to list tasks'
}
};
}
};
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreListTasksAction,
log
});
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch(error) {
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
log.error(`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`);
console.error(error.stack);
return { success: false, error: { code: 'CACHE_UTIL_ERROR', message: error.message }, fromCache: false };
}
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreListTasksAction,
log
});
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) {
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
log.error(
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
);
console.error(error.stack);
return {
success: false,
error: { code: 'CACHE_UTIL_ERROR', message: error.message },
fromCache: false
};
}
}
/**
@@ -138,30 +174,30 @@ export async function listTasksDirect(args, log) {
* @returns {Object} - Cache statistics
*/
export async function getCacheStatsDirect(args, log) {
try {
log.info('Retrieving cache statistics');
const stats = contextManager.getStats();
return {
success: true,
data: stats
};
} catch (error) {
log.error(`Error getting cache stats: ${error.message}`);
return {
success: false,
error: {
code: 'CACHE_STATS_ERROR',
message: error.message || 'Unknown error occurred'
}
};
}
try {
log.info('Retrieving cache statistics');
const stats = contextManager.getStats();
return {
success: true,
data: stats
};
} catch (error) {
log.error(`Error getting cache stats: ${error.message}`);
return {
success: false,
error: {
code: 'CACHE_STATS_ERROR',
message: error.message || 'Unknown error occurred'
}
};
}
}
/**
* Maps Task Master functions to their direct implementation
*/
export const directFunctions = {
list: listTasksDirect,
cacheStats: getCacheStatsDirect,
// Add more functions as we implement them
};
list: listTasksDirect,
cacheStats: getCacheStatsDirect
// Add more functions as we implement them
};

View File

@@ -1,10 +1,10 @@
import { FastMCP } from "fastmcp";
import path from "path";
import dotenv from "dotenv";
import { fileURLToPath } from "url";
import fs from "fs";
import logger from "./logger.js";
import { registerTaskMasterTools } from "./tools/index.js";
import { FastMCP } from 'fastmcp';
import path from 'path';
import dotenv from 'dotenv';
import { fileURLToPath } from 'url';
import fs from 'fs';
import logger from './logger.js';
import { registerTaskMasterTools } from './tools/index.js';
// Load environment variables
dotenv.config();
@@ -17,70 +17,70 @@ const __dirname = path.dirname(__filename);
* Main MCP server class that integrates with Task Master
*/
class TaskMasterMCPServer {
constructor() {
// Get version from package.json using synchronous fs
const packagePath = path.join(__dirname, "../../package.json");
const packageJson = JSON.parse(fs.readFileSync(packagePath, "utf8"));
constructor() {
// Get version from package.json using synchronous fs
const packagePath = path.join(__dirname, '../../package.json');
const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8'));
this.options = {
name: "Task Master MCP Server",
version: packageJson.version,
};
this.options = {
name: 'Task Master MCP Server',
version: packageJson.version
};
this.server = new FastMCP(this.options);
this.initialized = false;
this.server = new FastMCP(this.options);
this.initialized = false;
// this.server.addResource({});
// this.server.addResource({});
// this.server.addResourceTemplate({});
// this.server.addResourceTemplate({});
// Bind methods
this.init = this.init.bind(this);
this.start = this.start.bind(this);
this.stop = this.stop.bind(this);
// Bind methods
this.init = this.init.bind(this);
this.start = this.start.bind(this);
this.stop = this.stop.bind(this);
// Setup logging
this.logger = logger;
}
// Setup logging
this.logger = logger;
}
/**
* Initialize the MCP server with necessary tools and routes
*/
async init() {
if (this.initialized) return;
/**
* Initialize the MCP server with necessary tools and routes
*/
async init() {
if (this.initialized) return;
// Register Task Master tools
registerTaskMasterTools(this.server);
// Register Task Master tools
registerTaskMasterTools(this.server);
this.initialized = true;
this.initialized = true;
return this;
}
return this;
}
/**
* Start the MCP server
*/
async start() {
if (!this.initialized) {
await this.init();
}
/**
* Start the MCP server
*/
async start() {
if (!this.initialized) {
await this.init();
}
// Start the FastMCP server
await this.server.start({
transportType: "stdio",
});
// Start the FastMCP server
await this.server.start({
transportType: 'stdio'
});
return this;
}
return this;
}
/**
* Stop the MCP server
*/
async stop() {
if (this.server) {
await this.server.stop();
}
}
/**
* Stop the MCP server
*/
async stop() {
if (this.server) {
await this.server.stop();
}
}
}
export default TaskMasterMCPServer;

View File

@@ -1,18 +1,18 @@
import chalk from "chalk";
import chalk from 'chalk';
// Define log levels
const LOG_LEVELS = {
debug: 0,
info: 1,
warn: 2,
error: 3,
success: 4,
debug: 0,
info: 1,
warn: 2,
error: 3,
success: 4
};
// Get log level from environment or default to info
const LOG_LEVEL = process.env.LOG_LEVEL
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
: LOG_LEVELS.info;
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
: LOG_LEVELS.info;
/**
* Logs a message with the specified level
@@ -20,29 +20,29 @@ const LOG_LEVEL = process.env.LOG_LEVEL
* @param {...any} args - Arguments to log
*/
function log(level, ...args) {
const icons = {
debug: chalk.gray("🔍"),
info: chalk.blue(""),
warn: chalk.yellow("⚠️"),
error: chalk.red("❌"),
success: chalk.green("✅"),
};
const icons = {
debug: chalk.gray('🔍'),
info: chalk.blue(''),
warn: chalk.yellow('⚠️'),
error: chalk.red('❌'),
success: chalk.green('✅')
};
if (LOG_LEVELS[level] >= LOG_LEVEL) {
const icon = icons[level] || "";
if (LOG_LEVELS[level] >= LOG_LEVEL) {
const icon = icons[level] || '';
if (level === "error") {
console.error(icon, chalk.red(...args));
} else if (level === "warn") {
console.warn(icon, chalk.yellow(...args));
} else if (level === "success") {
console.log(icon, chalk.green(...args));
} else if (level === "info") {
console.log(icon, chalk.blue(...args));
} else {
console.log(icon, ...args);
}
}
if (level === 'error') {
console.error(icon, chalk.red(...args));
} else if (level === 'warn') {
console.warn(icon, chalk.yellow(...args));
} else if (level === 'success') {
console.log(icon, chalk.green(...args));
} else if (level === 'info') {
console.log(icon, chalk.blue(...args));
} else {
console.log(icon, ...args);
}
}
}
/**
@@ -51,14 +51,14 @@ function log(level, ...args) {
* @returns {Object} Logger object with info, error, debug, warn, and success methods
*/
export function createLogger() {
return {
debug: (message) => log("debug", message),
info: (message) => log("info", message),
warn: (message) => log("warn", message),
error: (message) => log("error", message),
success: (message) => log("success", message),
log: log, // Also expose the raw log function
};
return {
debug: (message) => log('debug', message),
info: (message) => log('info', message),
warn: (message) => log('warn', message),
error: (message) => log('error', message),
success: (message) => log('success', message),
log: log // Also expose the raw log function
};
}
// Export a default logger instance

View File

@@ -3,64 +3,64 @@
* Tool to add a new task using AI
*/
import { z } from "zod";
import { z } from 'zod';
import {
executeTaskMasterCommand,
createContentResponse,
createErrorResponse,
} from "./utils.js";
executeTaskMasterCommand,
createContentResponse,
createErrorResponse
} from './utils.js';
/**
* Register the addTask tool with the MCP server
* @param {FastMCP} server - FastMCP server instance
*/
export function registerAddTaskTool(server) {
server.addTool({
name: "addTask",
description: "Add a new task using AI",
parameters: z.object({
prompt: z.string().describe("Description of the task to add"),
dependencies: z
.string()
.optional()
.describe("Comma-separated list of task IDs this task depends on"),
priority: z
.string()
.optional()
.describe("Task priority (high, medium, low)"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log }) => {
try {
log.info(`Adding new task: ${args.prompt}`);
server.addTool({
name: 'addTask',
description: 'Add a new task using AI',
parameters: z.object({
prompt: z.string().describe('Description of the task to add'),
dependencies: z
.string()
.optional()
.describe('Comma-separated list of task IDs this task depends on'),
priority: z
.string()
.optional()
.describe('Task priority (high, medium, low)'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log }) => {
try {
log.info(`Adding new task: ${args.prompt}`);
const cmdArgs = [`--prompt="${args.prompt}"`];
if (args.dependencies)
cmdArgs.push(`--dependencies=${args.dependencies}`);
if (args.priority) cmdArgs.push(`--priority=${args.priority}`);
if (args.file) cmdArgs.push(`--file=${args.file}`);
const cmdArgs = [`--prompt="${args.prompt}"`];
if (args.dependencies)
cmdArgs.push(`--dependencies=${args.dependencies}`);
if (args.priority) cmdArgs.push(`--priority=${args.priority}`);
if (args.file) cmdArgs.push(`--file=${args.file}`);
const result = executeTaskMasterCommand(
"add-task",
log,
cmdArgs,
projectRoot
);
const result = executeTaskMasterCommand(
'add-task',
log,
cmdArgs,
projectRoot
);
if (!result.success) {
throw new Error(result.error);
}
if (!result.success) {
throw new Error(result.error);
}
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error adding task: ${error.message}`);
return createErrorResponse(`Error adding task: ${error.message}`);
}
},
});
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error adding task: ${error.message}`);
return createErrorResponse(`Error adding task: ${error.message}`);
}
}
});
}

View File

@@ -3,76 +3,76 @@
* Tool to break down a task into detailed subtasks
*/
import { z } from "zod";
import { z } from 'zod';
import {
executeTaskMasterCommand,
createContentResponse,
createErrorResponse,
} from "./utils.js";
executeTaskMasterCommand,
createContentResponse,
createErrorResponse
} from './utils.js';
/**
* Register the expandTask tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerExpandTaskTool(server) {
server.addTool({
name: "expandTask",
description: "Break down a task into detailed subtasks",
parameters: z.object({
id: z.string().describe("Task ID to expand"),
num: z.number().optional().describe("Number of subtasks to generate"),
research: z
.boolean()
.optional()
.describe(
"Enable Perplexity AI for research-backed subtask generation"
),
prompt: z
.string()
.optional()
.describe("Additional context to guide subtask generation"),
force: z
.boolean()
.optional()
.describe(
"Force regeneration of subtasks for tasks that already have them"
),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log }) => {
try {
log.info(`Expanding task ${args.id}`);
server.addTool({
name: 'expandTask',
description: 'Break down a task into detailed subtasks',
parameters: z.object({
id: z.string().describe('Task ID to expand'),
num: z.number().optional().describe('Number of subtasks to generate'),
research: z
.boolean()
.optional()
.describe(
'Enable Perplexity AI for research-backed subtask generation'
),
prompt: z
.string()
.optional()
.describe('Additional context to guide subtask generation'),
force: z
.boolean()
.optional()
.describe(
'Force regeneration of subtasks for tasks that already have them'
),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log }) => {
try {
log.info(`Expanding task ${args.id}`);
const cmdArgs = [`--id=${args.id}`];
if (args.num) cmdArgs.push(`--num=${args.num}`);
if (args.research) cmdArgs.push("--research");
if (args.prompt) cmdArgs.push(`--prompt="${args.prompt}"`);
if (args.force) cmdArgs.push("--force");
if (args.file) cmdArgs.push(`--file=${args.file}`);
const cmdArgs = [`--id=${args.id}`];
if (args.num) cmdArgs.push(`--num=${args.num}`);
if (args.research) cmdArgs.push('--research');
if (args.prompt) cmdArgs.push(`--prompt="${args.prompt}"`);
if (args.force) cmdArgs.push('--force');
if (args.file) cmdArgs.push(`--file=${args.file}`);
const projectRoot = args.projectRoot;
const projectRoot = args.projectRoot;
const result = executeTaskMasterCommand(
"expand",
log,
cmdArgs,
projectRoot
);
const result = executeTaskMasterCommand(
'expand',
log,
cmdArgs,
projectRoot
);
if (!result.success) {
throw new Error(result.error);
}
if (!result.success) {
throw new Error(result.error);
}
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error expanding task: ${error.message}`);
return createErrorResponse(`Error expanding task: ${error.message}`);
}
},
});
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error expanding task: ${error.message}`);
return createErrorResponse(`Error expanding task: ${error.message}`);
}
}
});
}

View File

@@ -3,27 +3,27 @@
* Export all Task Master CLI tools for MCP server
*/
import logger from "../logger.js";
import { registerListTasksTool } from "./listTasks.js";
import { registerShowTaskTool } from "./showTask.js";
import { registerSetTaskStatusTool } from "./setTaskStatus.js";
import { registerExpandTaskTool } from "./expandTask.js";
import { registerNextTaskTool } from "./nextTask.js";
import { registerAddTaskTool } from "./addTask.js";
import logger from '../logger.js';
import { registerListTasksTool } from './listTasks.js';
import { registerShowTaskTool } from './showTask.js';
import { registerSetTaskStatusTool } from './setTaskStatus.js';
import { registerExpandTaskTool } from './expandTask.js';
import { registerNextTaskTool } from './nextTask.js';
import { registerAddTaskTool } from './addTask.js';
/**
* Register all Task Master tools with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerTaskMasterTools(server) {
registerListTasksTool(server);
registerShowTaskTool(server);
registerSetTaskStatusTool(server);
registerExpandTaskTool(server);
registerNextTaskTool(server);
registerAddTaskTool(server);
registerListTasksTool(server);
registerShowTaskTool(server);
registerSetTaskStatusTool(server);
registerExpandTaskTool(server);
registerNextTaskTool(server);
registerAddTaskTool(server);
}
export default {
registerTaskMasterTools,
registerTaskMasterTools
};

View File

@@ -3,51 +3,50 @@
* Tool to list all tasks from Task Master
*/
import { z } from "zod";
import {
createErrorResponse,
handleApiResult
} from "./utils.js";
import { listTasksDirect } from "../core/task-master-core.js";
import { z } from 'zod';
import { createErrorResponse, handleApiResult } from './utils.js';
import { listTasksDirect } from '../core/task-master-core.js';
/**
* Register the listTasks tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerListTasksTool(server) {
server.addTool({
name: "listTasks",
description: "List all tasks from Task Master",
parameters: z.object({
status: z.string().optional().describe("Filter tasks by status"),
withSubtasks: z
.boolean()
.optional()
.describe("Include subtasks in the response"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log }) => {
try {
log.info(`Listing tasks with filters: ${JSON.stringify(args)}`);
// Call core function - args contains projectRoot which is handled internally
const result = await listTasksDirect(args, log);
// Log result and use handleApiResult utility
log.info(`Retrieved ${result.success ? (result.data?.tasks?.length || 0) : 0} tasks`);
return handleApiResult(result, log, 'Error listing tasks');
} catch (error) {
log.error(`Error listing tasks: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
server.addTool({
name: 'listTasks',
description: 'List all tasks from Task Master',
parameters: z.object({
status: z.string().optional().describe('Filter tasks by status'),
withSubtasks: z
.boolean()
.optional()
.describe('Include subtasks in the response'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log }) => {
try {
log.info(`Listing tasks with filters: ${JSON.stringify(args)}`);
// Call core function - args contains projectRoot which is handled internally
const result = await listTasksDirect(args, log);
// Log result and use handleApiResult utility
log.info(
`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks`
);
return handleApiResult(result, log, 'Error listing tasks');
} catch (error) {
log.error(`Error listing tasks: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}
// We no longer need the formatTasksResponse function as we're returning raw JSON data

View File

@@ -3,55 +3,55 @@
* Tool to show the next task to work on based on dependencies and status
*/
import { z } from "zod";
import { z } from 'zod';
import {
executeTaskMasterCommand,
createContentResponse,
createErrorResponse,
} from "./utils.js";
executeTaskMasterCommand,
createContentResponse,
createErrorResponse
} from './utils.js';
/**
* Register the nextTask tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerNextTaskTool(server) {
server.addTool({
name: "nextTask",
description:
"Show the next task to work on based on dependencies and status",
parameters: z.object({
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log }) => {
try {
log.info(`Finding next task to work on`);
server.addTool({
name: 'nextTask',
description:
'Show the next task to work on based on dependencies and status',
parameters: z.object({
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log }) => {
try {
log.info(`Finding next task to work on`);
const cmdArgs = [];
if (args.file) cmdArgs.push(`--file=${args.file}`);
const cmdArgs = [];
if (args.file) cmdArgs.push(`--file=${args.file}`);
const projectRoot = args.projectRoot;
const projectRoot = args.projectRoot;
const result = executeTaskMasterCommand(
"next",
log,
cmdArgs,
projectRoot
);
const result = executeTaskMasterCommand(
'next',
log,
cmdArgs,
projectRoot
);
if (!result.success) {
throw new Error(result.error);
}
if (!result.success) {
throw new Error(result.error);
}
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error finding next task: ${error.message}`);
return createErrorResponse(`Error finding next task: ${error.message}`);
}
},
});
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error finding next task: ${error.message}`);
return createErrorResponse(`Error finding next task: ${error.message}`);
}
}
});
}

View File

@@ -3,62 +3,62 @@
* Tool to set the status of a task
*/
import { z } from "zod";
import { z } from 'zod';
import {
executeTaskMasterCommand,
createContentResponse,
createErrorResponse,
} from "./utils.js";
executeTaskMasterCommand,
createContentResponse,
createErrorResponse
} from './utils.js';
/**
* Register the setTaskStatus tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerSetTaskStatusTool(server) {
server.addTool({
name: "setTaskStatus",
description: "Set the status of a task",
parameters: z.object({
id: z
.string()
.describe("Task ID (can be comma-separated for multiple tasks)"),
status: z
.string()
.describe("New status (todo, in-progress, review, done)"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log }) => {
try {
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
server.addTool({
name: 'setTaskStatus',
description: 'Set the status of a task',
parameters: z.object({
id: z
.string()
.describe('Task ID (can be comma-separated for multiple tasks)'),
status: z
.string()
.describe('New status (todo, in-progress, review, done)'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log }) => {
try {
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
const cmdArgs = [`--id=${args.id}`, `--status=${args.status}`];
if (args.file) cmdArgs.push(`--file=${args.file}`);
const cmdArgs = [`--id=${args.id}`, `--status=${args.status}`];
if (args.file) cmdArgs.push(`--file=${args.file}`);
const projectRoot = args.projectRoot;
const projectRoot = args.projectRoot;
const result = executeTaskMasterCommand(
"set-status",
log,
cmdArgs,
projectRoot
);
const result = executeTaskMasterCommand(
'set-status',
log,
cmdArgs,
projectRoot
);
if (!result.success) {
throw new Error(result.error);
}
if (!result.success) {
throw new Error(result.error);
}
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error setting task status: ${error.message}`);
return createErrorResponse(
`Error setting task status: ${error.message}`
);
}
},
});
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error setting task status: ${error.message}`);
return createErrorResponse(
`Error setting task status: ${error.message}`
);
}
}
});
}

View File

@@ -3,76 +3,80 @@
* Tool to show detailed information about a specific task
*/
import { z } from "zod";
import { z } from 'zod';
import {
executeTaskMasterCommand,
createErrorResponse,
handleApiResult
} from "./utils.js";
executeTaskMasterCommand,
createErrorResponse,
handleApiResult
} from './utils.js';
/**
* Register the showTask tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerShowTaskTool(server) {
server.addTool({
name: "showTask",
description: "Show detailed information about a specific task",
parameters: z.object({
id: z.string().describe("Task ID to show"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log }) => {
try {
log.info(`Showing task details for ID: ${args.id}`);
server.addTool({
name: 'showTask',
description: 'Show detailed information about a specific task',
parameters: z.object({
id: z.string().describe('Task ID to show'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log }) => {
try {
log.info(`Showing task details for ID: ${args.id}`);
// Prepare arguments for CLI command
const cmdArgs = [`--id=${args.id}`];
if (args.file) cmdArgs.push(`--file=${args.file}`);
// Prepare arguments for CLI command
const cmdArgs = [`--id=${args.id}`];
if (args.file) cmdArgs.push(`--file=${args.file}`);
// Execute the command - function now handles project root internally
const result = executeTaskMasterCommand(
"show",
log,
cmdArgs,
args.projectRoot // Pass raw project root, function will normalize it
);
// Execute the command - function now handles project root internally
const result = executeTaskMasterCommand(
'show',
log,
cmdArgs,
args.projectRoot // Pass raw project root, function will normalize it
);
// Process CLI result into API result format for handleApiResult
if (result.success) {
try {
// Try to parse response as JSON
const data = JSON.parse(result.stdout);
// Return equivalent of a successful API call with data
return handleApiResult({ success: true, data }, log, 'Error showing task');
} catch (e) {
// If parsing fails, still return success but with raw string data
return handleApiResult(
{ success: true, data: result.stdout },
log,
'Error showing task',
// Skip data processing for string data
null
);
}
} else {
// Return equivalent of a failed API call
return handleApiResult(
{ success: false, error: { message: result.error } },
log,
'Error showing task'
);
}
} catch (error) {
log.error(`Error showing task: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
// Process CLI result into API result format for handleApiResult
if (result.success) {
try {
// Try to parse response as JSON
const data = JSON.parse(result.stdout);
// Return equivalent of a successful API call with data
return handleApiResult(
{ success: true, data },
log,
'Error showing task'
);
} catch (e) {
// If parsing fails, still return success but with raw string data
return handleApiResult(
{ success: true, data: result.stdout },
log,
'Error showing task',
// Skip data processing for string data
null
);
}
} else {
// Return equivalent of a failed API call
return handleApiResult(
{ success: false, error: { message: result.error } },
log,
'Error showing task'
);
}
} catch (error) {
log.error(`Error showing task: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,27 +3,27 @@
* Utility functions for Task Master CLI integration
*/
import { spawnSync } from "child_process";
import path from "path";
import { spawnSync } from 'child_process';
import path from 'path';
import { contextManager } from '../core/context-manager.js'; // Import the singleton
/**
* Get normalized project root path
* Get normalized project root path
* @param {string|undefined} projectRootRaw - Raw project root from arguments
* @param {Object} log - Logger object
* @returns {string} - Normalized absolute path to project root
*/
export function getProjectRoot(projectRootRaw, log) {
// Make sure projectRoot is set
const rootPath = projectRootRaw || process.cwd();
// Ensure projectRoot is absolute
const projectRoot = path.isAbsolute(rootPath)
? rootPath
: path.resolve(process.cwd(), rootPath);
log.info(`Using project root: ${projectRoot}`);
return projectRoot;
// Make sure projectRoot is set
const rootPath = projectRootRaw || process.cwd();
// Ensure projectRoot is absolute
const projectRoot = path.isAbsolute(rootPath)
? rootPath
: path.resolve(process.cwd(), rootPath);
log.info(`Using project root: ${projectRoot}`);
return projectRoot;
}
/**
@@ -34,28 +34,35 @@ export function getProjectRoot(projectRootRaw, log) {
* @param {Function} processFunction - Optional function to process successful result data
* @returns {Object} - Standardized MCP response object
*/
export function handleApiResult(result, log, errorPrefix = 'API error', processFunction = processMCPResponseData) {
if (!result.success) {
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
// Include cache status in error logs
log.error(`${errorPrefix}: ${errorMsg}. From cache: ${result.fromCache}`); // Keep logging cache status on error
return createErrorResponse(errorMsg);
}
// Process the result data if needed
const processedData = processFunction ? processFunction(result.data) : result.data;
// Log success including cache status
log.info(`Successfully completed operation. From cache: ${result.fromCache}`); // Add success log with cache status
export function handleApiResult(
result,
log,
errorPrefix = 'API error',
processFunction = processMCPResponseData
) {
if (!result.success) {
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
// Include cache status in error logs
log.error(`${errorPrefix}: ${errorMsg}. From cache: ${result.fromCache}`); // Keep logging cache status on error
return createErrorResponse(errorMsg);
}
// Create the response payload including the fromCache flag
const responsePayload = {
fromCache: result.fromCache, // Get the flag from the original 'result'
data: processedData // Nest the processed data under a 'data' key
};
// Pass this combined payload to createContentResponse
return createContentResponse(responsePayload);
// Process the result data if needed
const processedData = processFunction
? processFunction(result.data)
: result.data;
// Log success including cache status
log.info(`Successfully completed operation. From cache: ${result.fromCache}`); // Add success log with cache status
// Create the response payload including the fromCache flag
const responsePayload = {
fromCache: result.fromCache, // Get the flag from the original 'result'
data: processedData // Nest the processed data under a 'data' key
};
// Pass this combined payload to createContentResponse
return createContentResponse(responsePayload);
}
/**
@@ -67,68 +74,68 @@ export function handleApiResult(result, log, errorPrefix = 'API error', processF
* @returns {Object} - The result of the command execution
*/
export function executeTaskMasterCommand(
command,
log,
args = [],
projectRootRaw = null
command,
log,
args = [],
projectRootRaw = null
) {
try {
// Normalize project root internally using the getProjectRoot utility
const cwd = getProjectRoot(projectRootRaw, log);
try {
// Normalize project root internally using the getProjectRoot utility
const cwd = getProjectRoot(projectRootRaw, log);
log.info(
`Executing task-master ${command} with args: ${JSON.stringify(
args
)} in directory: ${cwd}`
);
log.info(
`Executing task-master ${command} with args: ${JSON.stringify(
args
)} in directory: ${cwd}`
);
// Prepare full arguments array
const fullArgs = [command, ...args];
// Prepare full arguments array
const fullArgs = [command, ...args];
// Common options for spawn
const spawnOptions = {
encoding: "utf8",
cwd: cwd,
};
// Common options for spawn
const spawnOptions = {
encoding: 'utf8',
cwd: cwd
};
// Execute the command using the global task-master CLI or local script
// Try the global CLI first
let result = spawnSync("task-master", fullArgs, spawnOptions);
// Execute the command using the global task-master CLI or local script
// Try the global CLI first
let result = spawnSync('task-master', fullArgs, spawnOptions);
// If global CLI is not available, try fallback to the local script
if (result.error && result.error.code === "ENOENT") {
log.info("Global task-master not found, falling back to local script");
result = spawnSync("node", ["scripts/dev.js", ...fullArgs], spawnOptions);
}
// If global CLI is not available, try fallback to the local script
if (result.error && result.error.code === 'ENOENT') {
log.info('Global task-master not found, falling back to local script');
result = spawnSync('node', ['scripts/dev.js', ...fullArgs], spawnOptions);
}
if (result.error) {
throw new Error(`Command execution error: ${result.error.message}`);
}
if (result.error) {
throw new Error(`Command execution error: ${result.error.message}`);
}
if (result.status !== 0) {
// Improve error handling by combining stderr and stdout if stderr is empty
const errorOutput = result.stderr
? result.stderr.trim()
: result.stdout
? result.stdout.trim()
: "Unknown error";
throw new Error(
`Command failed with exit code ${result.status}: ${errorOutput}`
);
}
if (result.status !== 0) {
// Improve error handling by combining stderr and stdout if stderr is empty
const errorOutput = result.stderr
? result.stderr.trim()
: result.stdout
? result.stdout.trim()
: 'Unknown error';
throw new Error(
`Command failed with exit code ${result.status}: ${errorOutput}`
);
}
return {
success: true,
stdout: result.stdout,
stderr: result.stderr,
};
} catch (error) {
log.error(`Error executing task-master command: ${error.message}`);
return {
success: false,
error: error.message,
};
}
return {
success: true,
stdout: result.stdout,
stderr: result.stderr
};
} catch (error) {
log.error(`Error executing task-master command: ${error.message}`);
return {
success: false,
error: error.message
};
}
}
/**
@@ -144,40 +151,44 @@ export function executeTaskMasterCommand(
* Format: { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
*/
export async function getCachedOrExecute({ cacheKey, actionFn, log }) {
// Check cache first
const cachedResult = contextManager.getCachedData(cacheKey);
if (cachedResult !== undefined) {
log.info(`Cache hit for key: ${cacheKey}`);
// Return the cached data in the same structure as a fresh result
return {
...cachedResult, // Spread the cached result to maintain its structure
fromCache: true // Just add the fromCache flag
};
}
// Check cache first
const cachedResult = contextManager.getCachedData(cacheKey);
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
// Execute the action function if cache missed
const result = await actionFn();
// If the action was successful, cache the result (but without fromCache flag)
if (result.success && result.data !== undefined) {
log.info(`Action successful. Caching result for key: ${cacheKey}`);
// Cache the entire result structure (minus the fromCache flag)
const { fromCache, ...resultToCache } = result;
contextManager.setCachedData(cacheKey, resultToCache);
} else if (!result.success) {
log.warn(`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`);
} else {
log.warn(`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`);
}
// Return the fresh result, indicating it wasn't from cache
return {
...result,
fromCache: false
};
if (cachedResult !== undefined) {
log.info(`Cache hit for key: ${cacheKey}`);
// Return the cached data in the same structure as a fresh result
return {
...cachedResult, // Spread the cached result to maintain its structure
fromCache: true // Just add the fromCache flag
};
}
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
// Execute the action function if cache missed
const result = await actionFn();
// If the action was successful, cache the result (but without fromCache flag)
if (result.success && result.data !== undefined) {
log.info(`Action successful. Caching result for key: ${cacheKey}`);
// Cache the entire result structure (minus the fromCache flag)
const { fromCache, ...resultToCache } = result;
contextManager.setCachedData(cacheKey, resultToCache);
} else if (!result.success) {
log.warn(
`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`
);
} else {
log.warn(
`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`
);
}
// Return the fresh result, indicating it wasn't from cache
return {
...result,
fromCache: false
};
}
/**
@@ -194,79 +205,92 @@ export async function getCachedOrExecute({ cacheKey, actionFn, log }) {
* @returns {Promise<Object>} - Standardized response for FastMCP.
*/
export async function executeMCPToolAction({
actionFn,
args,
log,
actionName,
cacheKeyGenerator, // Note: We decided not to use this for listTasks for now
processResult = processMCPResponseData
actionFn,
args,
log,
actionName,
cacheKeyGenerator, // Note: We decided not to use this for listTasks for now
processResult = processMCPResponseData
}) {
try {
// Log the action start
log.info(`${actionName} with args: ${JSON.stringify(args)}`);
try {
// Log the action start
log.info(`${actionName} with args: ${JSON.stringify(args)}`);
// Normalize project root path - common to almost all tools
const projectRootRaw = args.projectRoot || process.cwd();
const projectRoot = path.isAbsolute(projectRootRaw)
? projectRootRaw
: path.resolve(process.cwd(), projectRootRaw);
// Normalize project root path - common to almost all tools
const projectRootRaw = args.projectRoot || process.cwd();
const projectRoot = path.isAbsolute(projectRootRaw)
? projectRootRaw
: path.resolve(process.cwd(), projectRootRaw);
log.info(`Using project root: ${projectRoot}`);
const executionArgs = { ...args, projectRoot };
log.info(`Using project root: ${projectRoot}`);
const executionArgs = { ...args, projectRoot };
let result;
const cacheKey = cacheKeyGenerator ? cacheKeyGenerator(executionArgs) : null;
let result;
const cacheKey = cacheKeyGenerator
? cacheKeyGenerator(executionArgs)
: null;
if (cacheKey) {
// Use caching utility
log.info(`Caching enabled for ${actionName} with key: ${cacheKey}`);
const cacheWrappedAction = async () => await actionFn(executionArgs, log);
result = await getCachedOrExecute({
cacheKey,
actionFn: cacheWrappedAction,
log
});
} else {
// Execute directly without caching
log.info(`Caching disabled for ${actionName}. Executing directly.`);
// We need to ensure the result from actionFn has a fromCache field
// Let's assume actionFn now consistently returns { success, data/error, fromCache }
// The current listTasksDirect does this if it calls getCachedOrExecute internally.
result = await actionFn(executionArgs, log);
// If the action function itself doesn't determine caching (like our original listTasksDirect refactor attempt),
// we'd set it here:
// result.fromCache = false;
}
if (cacheKey) {
// Use caching utility
log.info(`Caching enabled for ${actionName} with key: ${cacheKey}`);
const cacheWrappedAction = async () => await actionFn(executionArgs, log);
result = await getCachedOrExecute({
cacheKey,
actionFn: cacheWrappedAction,
log
});
} else {
// Execute directly without caching
log.info(`Caching disabled for ${actionName}. Executing directly.`);
// We need to ensure the result from actionFn has a fromCache field
// Let's assume actionFn now consistently returns { success, data/error, fromCache }
// The current listTasksDirect does this if it calls getCachedOrExecute internally.
result = await actionFn(executionArgs, log);
// If the action function itself doesn't determine caching (like our original listTasksDirect refactor attempt),
// we'd set it here:
// result.fromCache = false;
}
// Handle error case
if (!result.success) {
const errorMsg = result.error?.message || `Unknown error during ${actionName.toLowerCase()}`;
// Include fromCache in error logs too, might be useful
log.error(`Error during ${actionName.toLowerCase()}: ${errorMsg}. From cache: ${result.fromCache}`);
return createErrorResponse(errorMsg);
}
// Handle error case
if (!result.success) {
const errorMsg =
result.error?.message ||
`Unknown error during ${actionName.toLowerCase()}`;
// Include fromCache in error logs too, might be useful
log.error(
`Error during ${actionName.toLowerCase()}: ${errorMsg}. From cache: ${result.fromCache}`
);
return createErrorResponse(errorMsg);
}
// Log success
log.info(`Successfully completed ${actionName.toLowerCase()}. From cache: ${result.fromCache}`);
// Log success
log.info(
`Successfully completed ${actionName.toLowerCase()}. From cache: ${result.fromCache}`
);
// Process the result data if needed
const processedData = processResult ? processResult(result.data) : result.data;
// Process the result data if needed
const processedData = processResult
? processResult(result.data)
: result.data;
// Create a new object that includes both the processed data and the fromCache flag
const responsePayload = {
fromCache: result.fromCache, // Include the flag here
data: processedData // Embed the actual data under a 'data' key
};
// Pass this combined payload to createContentResponse
return createContentResponse(responsePayload);
// Create a new object that includes both the processed data and the fromCache flag
const responsePayload = {
fromCache: result.fromCache, // Include the flag here
data: processedData // Embed the actual data under a 'data' key
};
} catch (error) {
// Handle unexpected errors during the execution wrapper itself
log.error(`Unexpected error during ${actionName.toLowerCase()} execution wrapper: ${error.message}`);
console.error(error.stack); // Log stack for debugging wrapper errors
return createErrorResponse(`Internal server error during ${actionName.toLowerCase()}: ${error.message}`);
}
// Pass this combined payload to createContentResponse
return createContentResponse(responsePayload);
} catch (error) {
// Handle unexpected errors during the execution wrapper itself
log.error(
`Unexpected error during ${actionName.toLowerCase()} execution wrapper: ${error.message}`
);
console.error(error.stack); // Log stack for debugging wrapper errors
return createErrorResponse(
`Internal server error during ${actionName.toLowerCase()}: ${error.message}`
);
}
}
/**
@@ -276,56 +300,68 @@ export async function executeMCPToolAction({
* @param {string[]} fieldsToRemove - An array of field names to remove.
* @returns {Object|Array} - The processed data with specified fields removed.
*/
export function processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testStrategy']) {
if (!taskOrData) {
return taskOrData;
}
export function processMCPResponseData(
taskOrData,
fieldsToRemove = ['details', 'testStrategy']
) {
if (!taskOrData) {
return taskOrData;
}
// Helper function to process a single task object
const processSingleTask = (task) => {
if (typeof task !== 'object' || task === null) {
return task;
}
const processedTask = { ...task };
// Remove specified fields from the task
fieldsToRemove.forEach(field => {
delete processedTask[field];
});
// Helper function to process a single task object
const processSingleTask = (task) => {
if (typeof task !== 'object' || task === null) {
return task;
}
// Recursively process subtasks if they exist and are an array
if (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {
// Use processArrayOfTasks to handle the subtasks array
processedTask.subtasks = processArrayOfTasks(processedTask.subtasks);
}
return processedTask;
};
// Helper function to process an array of tasks
const processArrayOfTasks = (tasks) => {
return tasks.map(processSingleTask);
};
const processedTask = { ...task };
// Check if the input is a data structure containing a 'tasks' array (like from listTasks)
if (typeof taskOrData === 'object' && taskOrData !== null && Array.isArray(taskOrData.tasks)) {
return {
...taskOrData, // Keep other potential fields like 'stats', 'filter'
tasks: processArrayOfTasks(taskOrData.tasks),
};
}
// Check if the input is likely a single task object (add more checks if needed)
else if (typeof taskOrData === 'object' && taskOrData !== null && 'id' in taskOrData && 'title' in taskOrData) {
return processSingleTask(taskOrData);
}
// Check if the input is an array of tasks directly (less common but possible)
else if (Array.isArray(taskOrData)) {
return processArrayOfTasks(taskOrData);
}
// If it doesn't match known task structures, return it as is
return taskOrData;
// Remove specified fields from the task
fieldsToRemove.forEach((field) => {
delete processedTask[field];
});
// Recursively process subtasks if they exist and are an array
if (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {
// Use processArrayOfTasks to handle the subtasks array
processedTask.subtasks = processArrayOfTasks(processedTask.subtasks);
}
return processedTask;
};
// Helper function to process an array of tasks
const processArrayOfTasks = (tasks) => {
return tasks.map(processSingleTask);
};
// Check if the input is a data structure containing a 'tasks' array (like from listTasks)
if (
typeof taskOrData === 'object' &&
taskOrData !== null &&
Array.isArray(taskOrData.tasks)
) {
return {
...taskOrData, // Keep other potential fields like 'stats', 'filter'
tasks: processArrayOfTasks(taskOrData.tasks)
};
}
// Check if the input is likely a single task object (add more checks if needed)
else if (
typeof taskOrData === 'object' &&
taskOrData !== null &&
'id' in taskOrData &&
'title' in taskOrData
) {
return processSingleTask(taskOrData);
}
// Check if the input is an array of tasks directly (less common but possible)
else if (Array.isArray(taskOrData)) {
return processArrayOfTasks(taskOrData);
}
// If it doesn't match known task structures, return it as is
return taskOrData;
}
/**
@@ -334,19 +370,20 @@ export function processMCPResponseData(taskOrData, fieldsToRemove = ['details',
* @returns {Object} - Content response object in FastMCP format
*/
export function createContentResponse(content) {
// FastMCP requires text type, so we format objects as JSON strings
return {
content: [
{
type: "text",
text: typeof content === 'object' ?
// Format JSON nicely with indentation
JSON.stringify(content, null, 2) :
// Keep other content types as-is
String(content)
}
]
};
// FastMCP requires text type, so we format objects as JSON strings
return {
content: [
{
type: 'text',
text:
typeof content === 'object'
? // Format JSON nicely with indentation
JSON.stringify(content, null, 2)
: // Keep other content types as-is
String(content)
}
]
};
}
/**
@@ -355,13 +392,13 @@ export function createContentResponse(content) {
* @returns {Object} - Error content response object in FastMCP format
*/
export function createErrorResponse(errorMessage) {
return {
content: [
{
type: "text",
text: `Error: ${errorMessage}`
}
],
isError: true
};
return {
content: [
{
type: 'text',
text: `Error: ${errorMessage}`
}
],
isError: true
};
}

View File

@@ -1,6 +1,6 @@
{
"key": "value",
"nested": {
"prop": true
}
}
"key": "value",
"nested": {
"prop": true
}
}

15185
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,92 +1,95 @@
{
"name": "task-master-ai",
"version": "0.10.1",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",
"bin": {
"task-master": "bin/task-master.js",
"task-master-init": "bin/task-master-init.js",
"task-master-mcp-server": "mcp-server/server.js"
},
"scripts": {
"test": "node --experimental-vm-modules node_modules/.bin/jest",
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
"prepare-package": "node scripts/prepare-package.js",
"prepublishOnly": "npm run prepare-package",
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js",
"changeset": "changeset",
"release": "changeset publish",
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js"
},
"keywords": [
"claude",
"task",
"management",
"ai",
"development",
"cursor",
"anthropic",
"llm",
"mcp",
"context"
],
"author": "Eyal Toledano",
"license": "MIT WITH Commons-Clause",
"dependencies": {
"@anthropic-ai/sdk": "^0.39.0",
"boxen": "^8.0.1",
"chalk": "^4.1.2",
"cli-table3": "^0.6.5",
"commander": "^11.1.0",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"express": "^4.21.2",
"fastmcp": "^1.20.5",
"figlet": "^1.8.0",
"fuse.js": "^7.0.0",
"gradient-string": "^3.0.0",
"helmet": "^8.1.0",
"jsonwebtoken": "^9.0.2",
"lru-cache": "^10.2.0",
"openai": "^4.89.0",
"ora": "^8.2.0"
},
"engines": {
"node": ">=14.0.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/eyaltoledano/claude-task-master.git"
},
"homepage": "https://github.com/eyaltoledano/claude-task-master#readme",
"bugs": {
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
},
"files": [
"scripts/init.js",
"scripts/dev.js",
"scripts/modules/**",
"assets/**",
".cursor/**",
"README-task-master.md",
"index.js",
"bin/**",
"mcp-server/**"
],
"overrides": {
"node-fetch": "^3.3.2",
"whatwg-url": "^11.0.0"
},
"devDependencies": {
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",
"@types/jest": "^29.5.14",
"jest": "^29.7.0",
"jest-environment-node": "^29.7.0",
"mock-fs": "^5.5.0",
"supertest": "^7.1.0"
}
"name": "task-master-ai",
"version": "0.10.1",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",
"bin": {
"task-master": "bin/task-master.js",
"task-master-init": "bin/task-master-init.js",
"task-master-mcp-server": "mcp-server/server.js"
},
"scripts": {
"test": "node --experimental-vm-modules node_modules/.bin/jest",
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
"prepare-package": "node scripts/prepare-package.js",
"prepublishOnly": "npm run prepare-package",
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js",
"changeset": "changeset",
"release": "changeset publish",
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js",
"format-check": "prettier --check .",
"format": "prettier --write ."
},
"keywords": [
"claude",
"task",
"management",
"ai",
"development",
"cursor",
"anthropic",
"llm",
"mcp",
"context"
],
"author": "Eyal Toledano",
"license": "MIT WITH Commons-Clause",
"dependencies": {
"@anthropic-ai/sdk": "^0.39.0",
"boxen": "^8.0.1",
"chalk": "^4.1.2",
"cli-table3": "^0.6.5",
"commander": "^11.1.0",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"express": "^4.21.2",
"fastmcp": "^1.20.5",
"figlet": "^1.8.0",
"fuse.js": "^7.0.0",
"gradient-string": "^3.0.0",
"helmet": "^8.1.0",
"jsonwebtoken": "^9.0.2",
"lru-cache": "^10.2.0",
"openai": "^4.89.0",
"ora": "^8.2.0"
},
"engines": {
"node": ">=14.0.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/eyaltoledano/claude-task-master.git"
},
"homepage": "https://github.com/eyaltoledano/claude-task-master#readme",
"bugs": {
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
},
"files": [
"scripts/init.js",
"scripts/dev.js",
"scripts/modules/**",
"assets/**",
".cursor/**",
"README-task-master.md",
"index.js",
"bin/**",
"mcp-server/**"
],
"overrides": {
"node-fetch": "^3.3.2",
"whatwg-url": "^11.0.0"
},
"devDependencies": {
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",
"@types/jest": "^29.5.14",
"jest": "^29.7.0",
"jest-environment-node": "^29.7.0",
"mock-fs": "^5.5.0",
"prettier": "3.5.3",
"supertest": "^7.1.0"
}
}

View File

@@ -21,9 +21,11 @@ In an AI-driven development process—particularly with tools like [Cursor](http
The script can be configured through environment variables in a `.env` file at the root of the project:
### Required Configuration
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
### Optional Configuration
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
@@ -38,9 +40,10 @@ The script can be configured through environment variables in a `.env` file at t
## How It Works
1. **`tasks.json`**:
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
1. **`tasks.json`**:
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
- Tasks can have `subtasks` for more detailed implementation steps.
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
@@ -102,6 +105,7 @@ node scripts/dev.js update --file=custom-tasks.json --from=5 --prompt="Change da
```
Notes:
- The `--prompt` parameter is required and should explain the changes or new context
- Only tasks that aren't marked as 'done' will be updated
- Tasks with ID >= the specified --from value will be updated
@@ -120,6 +124,7 @@ node scripts/dev.js update-task --id=4 --prompt="Use JWT for authentication" --r
```
This command:
- Updates only the specified task rather than a range of tasks
- Provides detailed validation with helpful error messages
- Checks for required API keys when using research mode
@@ -146,6 +151,7 @@ node scripts/dev.js set-status --id=1,2,3 --status=done
```
Notes:
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
- You can specify multiple task IDs by separating them with commas
@@ -195,6 +201,7 @@ node scripts/dev.js clear-subtasks --all
```
Notes:
- After clearing subtasks, task files are automatically regenerated
- This is useful when you want to regenerate subtasks with a different approach
- Can be combined with the `expand` command to immediately generate new subtasks
@@ -210,6 +217,7 @@ The script integrates with two AI services:
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
To use the Perplexity integration:
1. Obtain a Perplexity API key
2. Add `PERPLEXITY_API_KEY` to your `.env` file
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
@@ -218,6 +226,7 @@ To use the Perplexity integration:
## Logging
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
- `debug`: Detailed information, typically useful for troubleshooting
- `info`: Confirmation that things are working as expected (default)
- `warn`: Warning messages that don't prevent execution
@@ -240,17 +249,20 @@ node scripts/dev.js remove-dependency --id=<id> --depends-on=<id>
These commands:
1. **Allow precise dependency management**:
- Add dependencies between tasks with automatic validation
- Remove dependencies when they're no longer needed
- Update task files automatically after changes
2. **Include validation checks**:
- Prevent circular dependencies (a task depending on itself)
- Prevent duplicate dependencies
- Verify that both tasks exist before adding/removing dependencies
- Check if dependencies exist before attempting to remove them
3. **Provide clear feedback**:
- Success messages confirm when dependencies are added/removed
- Error messages explain why operations failed (if applicable)
@@ -275,6 +287,7 @@ node scripts/dev.js validate-dependencies --file=custom-tasks.json
```
This command:
- Scans all tasks and subtasks for non-existent dependencies
- Identifies potential self-dependencies (tasks referencing themselves)
- Reports all found issues without modifying files
@@ -296,6 +309,7 @@ node scripts/dev.js fix-dependencies --file=custom-tasks.json
```
This command:
1. **Validates all dependencies** across tasks and subtasks
2. **Automatically removes**:
- References to non-existent tasks and subtasks
@@ -333,6 +347,7 @@ node scripts/dev.js analyze-complexity --research
```
Notes:
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
- Tasks are scored on a scale of 1-10
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
@@ -357,33 +372,35 @@ node scripts/dev.js expand --id=8 --num=5 --prompt="Custom prompt"
```
When a complexity report exists:
- The `expand` command will use the recommended subtask count from the report (unless overridden)
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
- When using `--all`, tasks are sorted by complexity score (highest first)
- The `--research` flag is preserved from the complexity analysis to expansion
The output report structure is:
```json
{
"meta": {
"generatedAt": "2023-06-15T12:34:56.789Z",
"tasksAnalyzed": 20,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9.5,
"recommendedSubtasks": 6,
"expansionPrompt": "Create subtasks that handle detecting...",
"reasoning": "This task requires sophisticated logic...",
"expansionCommand": "node scripts/dev.js expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
},
// More tasks sorted by complexity score (highest first)
]
"meta": {
"generatedAt": "2023-06-15T12:34:56.789Z",
"tasksAnalyzed": 20,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9.5,
"recommendedSubtasks": 6,
"expansionPrompt": "Create subtasks that handle detecting...",
"reasoning": "This task requires sophisticated logic...",
"expansionCommand": "node scripts/dev.js expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
}
// More tasks sorted by complexity score (highest first)
]
}
```
@@ -457,16 +474,19 @@ This command is particularly useful when you need to examine a specific task in
The script now includes improved error handling throughout all commands:
1. **Detailed Validation**:
- Required parameters (like task IDs and prompts) are validated early
- File existence is checked with customized errors for common scenarios
- Parameter type conversion is handled with clear error messages
2. **Contextual Error Messages**:
- Task not found errors include suggestions to run the list command
- API key errors include reminders to check environment variables
- Invalid ID format errors show the expected format
3. **Command-Specific Help Displays**:
- When validation fails, detailed help for the specific command is shown
- Help displays include usage examples and parameter descriptions
- Formatted in clear, color-coded boxes with examples
@@ -481,11 +501,13 @@ The script now includes improved error handling throughout all commands:
The script now automatically checks for updates without slowing down execution:
1. **Background Version Checking**:
- Non-blocking version checks run in the background while commands execute
- Actual command execution isn't delayed by version checking
- Update notifications appear after command completion
2. **Update Notifications**:
- When a newer version is available, a notification is displayed
- Notifications include current version, latest version, and update command
- Formatted in an attention-grabbing box with clear instructions
@@ -516,6 +538,7 @@ node scripts/dev.js add-subtask --parent=5 --title="Login API route" --skip-gene
```
Key features:
- Create new subtasks with detailed properties or convert existing tasks
- Define dependencies between subtasks
- Set custom status for new subtasks
@@ -538,7 +561,8 @@ node scripts/dev.js remove-subtask --id=5.2 --skip-generate
```
Key features:
- Remove subtasks individually or in batches
- Optionally convert subtasks to standalone tasks
- Control whether task files are regenerated
- Provides detailed success messages and next steps
- Provides detailed success messages and next steps

View File

@@ -3,17 +3,17 @@
/**
* dev.js
* Task Master CLI - AI-driven development task management
*
*
* This is the refactored entry point that uses the modular architecture.
* It imports functionality from the modules directory and provides a CLI.
*/
// Add at the very beginning of the file
if (process.env.DEBUG === '1') {
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
}
import { runCLI } from './modules/commands.js';
// Run the CLI with the process arguments
runCLI(process.argv);
runCLI(process.argv);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -8,4 +8,4 @@ export * from './utils.js';
export * from './ui.js';
export * from './ai-services.js';
export * from './task-manager.js';
export * from './commands.js';
export * from './commands.js';

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -9,23 +9,23 @@ import chalk from 'chalk';
// Configuration and constants
const CONFIG = {
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
debug: process.env.DEBUG === "true",
logLevel: process.env.LOG_LEVEL || "info",
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || "3"),
defaultPriority: process.env.DEFAULT_PRIORITY || "medium",
projectName: process.env.PROJECT_NAME || "Task Master",
projectVersion: "1.5.0" // Hardcoded version - ALWAYS use this value, ignore environment variable
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
debug: process.env.DEBUG === 'true',
logLevel: process.env.LOG_LEVEL || 'info',
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || '3'),
defaultPriority: process.env.DEFAULT_PRIORITY || 'medium',
projectName: process.env.PROJECT_NAME || 'Task Master',
projectVersion: '1.5.0' // Hardcoded version - ALWAYS use this value, ignore environment variable
};
// Set up logging based on log level
const LOG_LEVELS = {
debug: 0,
info: 1,
warn: 2,
error: 3
debug: 0,
info: 1,
warn: 2,
error: 3
};
/**
@@ -34,18 +34,18 @@ const LOG_LEVELS = {
* @param {...any} args - Arguments to log
*/
function log(level, ...args) {
const icons = {
debug: chalk.gray('🔍'),
info: chalk.blue(''),
warn: chalk.yellow('⚠️'),
error: chalk.red('❌'),
success: chalk.green('✅')
};
if (LOG_LEVELS[level] >= LOG_LEVELS[CONFIG.logLevel]) {
const icon = icons[level] || '';
console.log(`${icon} ${args.join(' ')}`);
}
const icons = {
debug: chalk.gray('🔍'),
info: chalk.blue(''),
warn: chalk.yellow('⚠️'),
error: chalk.red('❌'),
success: chalk.green('✅')
};
if (LOG_LEVELS[level] >= LOG_LEVELS[CONFIG.logLevel]) {
const icon = icons[level] || '';
console.log(`${icon} ${args.join(' ')}`);
}
}
/**
@@ -54,16 +54,16 @@ function log(level, ...args) {
* @returns {Object} Parsed JSON data
*/
function readJSON(filepath) {
try {
const rawData = fs.readFileSync(filepath, 'utf8');
return JSON.parse(rawData);
} catch (error) {
log('error', `Error reading JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
console.error(error);
}
return null;
}
try {
const rawData = fs.readFileSync(filepath, 'utf8');
return JSON.parse(rawData);
} catch (error) {
log('error', `Error reading JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
console.error(error);
}
return null;
}
}
/**
@@ -72,14 +72,14 @@ function readJSON(filepath) {
* @param {Object} data - Data to write
*/
function writeJSON(filepath, data) {
try {
fs.writeFileSync(filepath, JSON.stringify(data, null, 2));
} catch (error) {
log('error', `Error writing JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
console.error(error);
}
}
try {
fs.writeFileSync(filepath, JSON.stringify(data, null, 2));
} catch (error) {
log('error', `Error writing JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
console.error(error);
}
}
}
/**
@@ -88,8 +88,8 @@ function writeJSON(filepath, data) {
* @returns {string} Sanitized prompt
*/
function sanitizePrompt(prompt) {
// Replace double quotes with escaped double quotes
return prompt.replace(/"/g, '\\"');
// Replace double quotes with escaped double quotes
return prompt.replace(/"/g, '\\"');
}
/**
@@ -98,18 +98,20 @@ function sanitizePrompt(prompt) {
* @returns {Object|null} The parsed complexity report or null if not found
*/
function readComplexityReport(customPath = null) {
try {
const reportPath = customPath || path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
if (!fs.existsSync(reportPath)) {
return null;
}
const reportData = fs.readFileSync(reportPath, 'utf8');
return JSON.parse(reportData);
} catch (error) {
log('warn', `Could not read complexity report: ${error.message}`);
return null;
}
try {
const reportPath =
customPath ||
path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
if (!fs.existsSync(reportPath)) {
return null;
}
const reportData = fs.readFileSync(reportPath, 'utf8');
return JSON.parse(reportData);
} catch (error) {
log('warn', `Could not read complexity report: ${error.message}`);
return null;
}
}
/**
@@ -119,11 +121,15 @@ function readComplexityReport(customPath = null) {
* @returns {Object|null} The task analysis or null if not found
*/
function findTaskInComplexityReport(report, taskId) {
if (!report || !report.complexityAnalysis || !Array.isArray(report.complexityAnalysis)) {
return null;
}
return report.complexityAnalysis.find(task => task.taskId === taskId);
if (
!report ||
!report.complexityAnalysis ||
!Array.isArray(report.complexityAnalysis)
) {
return null;
}
return report.complexityAnalysis.find((task) => task.taskId === taskId);
}
/**
@@ -133,24 +139,26 @@ function findTaskInComplexityReport(report, taskId) {
* @returns {boolean} True if the task exists, false otherwise
*/
function taskExists(tasks, taskId) {
if (!taskId || !tasks || !Array.isArray(tasks)) {
return false;
}
// Handle both regular task IDs and subtask IDs (e.g., "1.2")
if (typeof taskId === 'string' && taskId.includes('.')) {
const [parentId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
const parentTask = tasks.find(t => t.id === parentId);
if (!parentTask || !parentTask.subtasks) {
return false;
}
return parentTask.subtasks.some(st => st.id === subtaskId);
}
const id = parseInt(taskId, 10);
return tasks.some(t => t.id === id);
if (!taskId || !tasks || !Array.isArray(tasks)) {
return false;
}
// Handle both regular task IDs and subtask IDs (e.g., "1.2")
if (typeof taskId === 'string' && taskId.includes('.')) {
const [parentId, subtaskId] = taskId
.split('.')
.map((id) => parseInt(id, 10));
const parentTask = tasks.find((t) => t.id === parentId);
if (!parentTask || !parentTask.subtasks) {
return false;
}
return parentTask.subtasks.some((st) => st.id === subtaskId);
}
const id = parseInt(taskId, 10);
return tasks.some((t) => t.id === id);
}
/**
@@ -159,15 +167,15 @@ function taskExists(tasks, taskId) {
* @returns {string} The formatted task ID
*/
function formatTaskId(id) {
if (typeof id === 'string' && id.includes('.')) {
return id; // Already formatted as a string with a dot (e.g., "1.2")
}
if (typeof id === 'number') {
return id.toString();
}
return id;
if (typeof id === 'string' && id.includes('.')) {
return id; // Already formatted as a string with a dot (e.g., "1.2")
}
if (typeof id === 'number') {
return id.toString();
}
return id;
}
/**
@@ -177,35 +185,37 @@ function formatTaskId(id) {
* @returns {Object|null} The task object or null if not found
*/
function findTaskById(tasks, taskId) {
if (!taskId || !tasks || !Array.isArray(tasks)) {
return null;
}
// Check if it's a subtask ID (e.g., "1.2")
if (typeof taskId === 'string' && taskId.includes('.')) {
const [parentId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
const parentTask = tasks.find(t => t.id === parentId);
if (!parentTask || !parentTask.subtasks) {
return null;
}
const subtask = parentTask.subtasks.find(st => st.id === subtaskId);
if (subtask) {
// Add reference to parent task for context
subtask.parentTask = {
id: parentTask.id,
title: parentTask.title,
status: parentTask.status
};
subtask.isSubtask = true;
}
return subtask || null;
}
const id = parseInt(taskId, 10);
return tasks.find(t => t.id === id) || null;
if (!taskId || !tasks || !Array.isArray(tasks)) {
return null;
}
// Check if it's a subtask ID (e.g., "1.2")
if (typeof taskId === 'string' && taskId.includes('.')) {
const [parentId, subtaskId] = taskId
.split('.')
.map((id) => parseInt(id, 10));
const parentTask = tasks.find((t) => t.id === parentId);
if (!parentTask || !parentTask.subtasks) {
return null;
}
const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
if (subtask) {
// Add reference to parent task for context
subtask.parentTask = {
id: parentTask.id,
title: parentTask.title,
status: parentTask.status
};
subtask.isSubtask = true;
}
return subtask || null;
}
const id = parseInt(taskId, 10);
return tasks.find((t) => t.id === id) || null;
}
/**
@@ -215,11 +225,11 @@ function findTaskById(tasks, taskId) {
* @returns {string} The truncated text
*/
function truncate(text, maxLength) {
if (!text || text.length <= maxLength) {
return text;
}
return text.slice(0, maxLength - 3) + '...';
if (!text || text.length <= maxLength) {
return text;
}
return text.slice(0, maxLength - 3) + '...';
}
/**
@@ -230,39 +240,47 @@ function truncate(text, maxLength) {
* @param {Set} recursionStack - Set of nodes in current recursion stack
* @returns {Array} - List of dependency edges that need to be removed to break cycles
*/
function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStack = new Set(), path = []) {
// Mark the current node as visited and part of recursion stack
visited.add(subtaskId);
recursionStack.add(subtaskId);
path.push(subtaskId);
const cyclesToBreak = [];
// Get all dependencies of the current subtask
const dependencies = dependencyMap.get(subtaskId) || [];
// For each dependency
for (const depId of dependencies) {
// If not visited, recursively check for cycles
if (!visited.has(depId)) {
const cycles = findCycles(depId, dependencyMap, visited, recursionStack, [...path]);
cyclesToBreak.push(...cycles);
}
// If the dependency is in the recursion stack, we found a cycle
else if (recursionStack.has(depId)) {
// Find the position of the dependency in the path
const cycleStartIndex = path.indexOf(depId);
// The last edge in the cycle is what we want to remove
const cycleEdges = path.slice(cycleStartIndex);
// We'll remove the last edge in the cycle (the one that points back)
cyclesToBreak.push(depId);
}
}
// Remove the node from recursion stack before returning
recursionStack.delete(subtaskId);
return cyclesToBreak;
function findCycles(
subtaskId,
dependencyMap,
visited = new Set(),
recursionStack = new Set(),
path = []
) {
// Mark the current node as visited and part of recursion stack
visited.add(subtaskId);
recursionStack.add(subtaskId);
path.push(subtaskId);
const cyclesToBreak = [];
// Get all dependencies of the current subtask
const dependencies = dependencyMap.get(subtaskId) || [];
// For each dependency
for (const depId of dependencies) {
// If not visited, recursively check for cycles
if (!visited.has(depId)) {
const cycles = findCycles(depId, dependencyMap, visited, recursionStack, [
...path
]);
cyclesToBreak.push(...cycles);
}
// If the dependency is in the recursion stack, we found a cycle
else if (recursionStack.has(depId)) {
// Find the position of the dependency in the path
const cycleStartIndex = path.indexOf(depId);
// The last edge in the cycle is what we want to remove
const cycleEdges = path.slice(cycleStartIndex);
// We'll remove the last edge in the cycle (the one that points back)
cyclesToBreak.push(depId);
}
}
// Remove the node from recursion stack before returning
recursionStack.delete(subtaskId);
return cyclesToBreak;
}
/**
@@ -271,23 +289,23 @@ function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStac
* @returns {string} The kebab-case version of the string
*/
const toKebabCase = (str) => {
// Special handling for common acronyms
const withReplacedAcronyms = str
.replace(/ID/g, 'Id')
.replace(/API/g, 'Api')
.replace(/UI/g, 'Ui')
.replace(/URL/g, 'Url')
.replace(/URI/g, 'Uri')
.replace(/JSON/g, 'Json')
.replace(/XML/g, 'Xml')
.replace(/HTML/g, 'Html')
.replace(/CSS/g, 'Css');
// Insert hyphens before capital letters and convert to lowercase
return withReplacedAcronyms
.replace(/([A-Z])/g, '-$1')
.toLowerCase()
.replace(/^-/, ''); // Remove leading hyphen if present
// Special handling for common acronyms
const withReplacedAcronyms = str
.replace(/ID/g, 'Id')
.replace(/API/g, 'Api')
.replace(/UI/g, 'Ui')
.replace(/URL/g, 'Url')
.replace(/URI/g, 'Uri')
.replace(/JSON/g, 'Json')
.replace(/XML/g, 'Xml')
.replace(/HTML/g, 'Html')
.replace(/CSS/g, 'Css');
// Insert hyphens before capital letters and convert to lowercase
return withReplacedAcronyms
.replace(/([A-Z])/g, '-$1')
.toLowerCase()
.replace(/^-/, ''); // Remove leading hyphen if present
};
/**
@@ -296,46 +314,46 @@ const toKebabCase = (str) => {
* @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted
*/
function detectCamelCaseFlags(args) {
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
}
// Export all utility functions and configuration
export {
CONFIG,
LOG_LEVELS,
log,
readJSON,
writeJSON,
sanitizePrompt,
readComplexityReport,
findTaskInComplexityReport,
taskExists,
formatTaskId,
findTaskById,
truncate,
findCycles,
toKebabCase,
detectCamelCaseFlags
};
CONFIG,
LOG_LEVELS,
log,
readJSON,
writeJSON,
sanitizePrompt,
readComplexityReport,
findTaskInComplexityReport,
taskExists,
formatTaskId,
findTaskById,
truncate,
findCycles,
toKebabCase,
detectCamelCaseFlags
};

View File

@@ -3,7 +3,7 @@
/**
* This script prepares the package for publication to NPM.
* It ensures all necessary files are included and properly configured.
*
*
* Additional options:
* --patch: Increment patch version (default)
* --minor: Increment minor version
@@ -22,175 +22,189 @@ const __dirname = dirname(__filename);
// Define colors for console output
const COLORS = {
reset: '\x1b[0m',
bright: '\x1b[1m',
dim: '\x1b[2m',
red: '\x1b[31m',
green: '\x1b[32m',
yellow: '\x1b[33m',
blue: '\x1b[34m',
magenta: '\x1b[35m',
cyan: '\x1b[36m'
reset: '\x1b[0m',
bright: '\x1b[1m',
dim: '\x1b[2m',
red: '\x1b[31m',
green: '\x1b[32m',
yellow: '\x1b[33m',
blue: '\x1b[34m',
magenta: '\x1b[35m',
cyan: '\x1b[36m'
};
// Parse command line arguments
const args = process.argv.slice(2);
const versionBump = args.includes('--major') ? 'major' :
args.includes('--minor') ? 'minor' :
'patch';
const versionBump = args.includes('--major')
? 'major'
: args.includes('--minor')
? 'minor'
: 'patch';
// Check for explicit version
const versionArg = args.find(arg => arg.startsWith('--version='));
const versionArg = args.find((arg) => arg.startsWith('--version='));
const explicitVersion = versionArg ? versionArg.split('=')[1] : null;
// Log function with color support
function log(level, ...args) {
const prefix = {
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
}[level.toLowerCase()];
console.log(prefix, ...args);
const prefix = {
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
}[level.toLowerCase()];
console.log(prefix, ...args);
}
// Function to check if a file exists
function fileExists(filePath) {
return fs.existsSync(filePath);
return fs.existsSync(filePath);
}
// Function to ensure a file is executable
function ensureExecutable(filePath) {
try {
fs.chmodSync(filePath, '755');
log('info', `Made ${filePath} executable`);
} catch (error) {
log('error', `Failed to make ${filePath} executable:`, error.message);
return false;
}
return true;
try {
fs.chmodSync(filePath, '755');
log('info', `Made ${filePath} executable`);
} catch (error) {
log('error', `Failed to make ${filePath} executable:`, error.message);
return false;
}
return true;
}
// Function to sync template files
function syncTemplateFiles() {
// We no longer need to sync files since we're using them directly
log('info', 'Template syncing has been deprecated - using source files directly');
return true;
// We no longer need to sync files since we're using them directly
log(
'info',
'Template syncing has been deprecated - using source files directly'
);
return true;
}
// Function to increment version
function incrementVersion(currentVersion, type = 'patch') {
const [major, minor, patch] = currentVersion.split('.').map(Number);
switch (type) {
case 'major':
return `${major + 1}.0.0`;
case 'minor':
return `${major}.${minor + 1}.0`;
case 'patch':
default:
return `${major}.${minor}.${patch + 1}`;
}
const [major, minor, patch] = currentVersion.split('.').map(Number);
switch (type) {
case 'major':
return `${major + 1}.0.0`;
case 'minor':
return `${major}.${minor + 1}.0`;
case 'patch':
default:
return `${major}.${minor}.${patch + 1}`;
}
}
// Main function to prepare the package
function preparePackage() {
const rootDir = path.join(__dirname, '..');
log('info', `Preparing package in ${rootDir}`);
// Update version in package.json
const packageJsonPath = path.join(rootDir, 'package.json');
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
const currentVersion = packageJson.version;
let newVersion;
if (explicitVersion) {
newVersion = explicitVersion;
log('info', `Setting version to specified ${newVersion} (was ${currentVersion})`);
} else {
newVersion = incrementVersion(currentVersion, versionBump);
log('info', `Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`);
}
packageJson.version = newVersion;
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
log('success', `Updated package.json version to ${newVersion}`);
// Check for required files
const requiredFiles = [
'package.json',
'README-task-master.md',
'index.js',
'scripts/init.js',
'scripts/dev.js',
'assets/env.example',
'assets/gitignore',
'assets/example_prd.txt',
'assets/scripts_README.md',
'.cursor/rules/dev_workflow.mdc',
'.cursor/rules/cursor_rules.mdc',
'.cursor/rules/self_improve.mdc'
];
let allFilesExist = true;
for (const file of requiredFiles) {
const filePath = path.join(rootDir, file);
if (!fileExists(filePath)) {
log('error', `Required file ${file} does not exist`);
allFilesExist = false;
}
}
if (!allFilesExist) {
log('error', 'Some required files are missing. Package preparation failed.');
process.exit(1);
}
// Ensure scripts are executable
const executableScripts = [
'scripts/init.js',
'scripts/dev.js'
];
let allScriptsExecutable = true;
for (const script of executableScripts) {
const scriptPath = path.join(rootDir, script);
if (!ensureExecutable(scriptPath)) {
allScriptsExecutable = false;
}
}
if (!allScriptsExecutable) {
log('warn', 'Some scripts could not be made executable. This may cause issues.');
}
// Run npm pack to test package creation
try {
log('info', 'Running npm pack to test package creation...');
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
log('info', output);
} catch (error) {
log('error', 'Failed to run npm pack:', error.message);
process.exit(1);
}
// Make scripts executable
log('info', 'Making scripts executable...');
try {
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
log('info', 'Made scripts/init.js executable');
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
log('info', 'Made scripts/dev.js executable');
} catch (error) {
log('error', 'Failed to make scripts executable:', error.message);
}
log('success', `Package preparation completed successfully! 🎉`);
log('success', `Version updated to ${newVersion}`);
log('info', 'You can now publish the package with:');
log('info', ' npm publish');
const rootDir = path.join(__dirname, '..');
log('info', `Preparing package in ${rootDir}`);
// Update version in package.json
const packageJsonPath = path.join(rootDir, 'package.json');
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
const currentVersion = packageJson.version;
let newVersion;
if (explicitVersion) {
newVersion = explicitVersion;
log(
'info',
`Setting version to specified ${newVersion} (was ${currentVersion})`
);
} else {
newVersion = incrementVersion(currentVersion, versionBump);
log(
'info',
`Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`
);
}
packageJson.version = newVersion;
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
log('success', `Updated package.json version to ${newVersion}`);
// Check for required files
const requiredFiles = [
'package.json',
'README-task-master.md',
'index.js',
'scripts/init.js',
'scripts/dev.js',
'assets/env.example',
'assets/gitignore',
'assets/example_prd.txt',
'assets/scripts_README.md',
'.cursor/rules/dev_workflow.mdc',
'.cursor/rules/cursor_rules.mdc',
'.cursor/rules/self_improve.mdc'
];
let allFilesExist = true;
for (const file of requiredFiles) {
const filePath = path.join(rootDir, file);
if (!fileExists(filePath)) {
log('error', `Required file ${file} does not exist`);
allFilesExist = false;
}
}
if (!allFilesExist) {
log(
'error',
'Some required files are missing. Package preparation failed.'
);
process.exit(1);
}
// Ensure scripts are executable
const executableScripts = ['scripts/init.js', 'scripts/dev.js'];
let allScriptsExecutable = true;
for (const script of executableScripts) {
const scriptPath = path.join(rootDir, script);
if (!ensureExecutable(scriptPath)) {
allScriptsExecutable = false;
}
}
if (!allScriptsExecutable) {
log(
'warn',
'Some scripts could not be made executable. This may cause issues.'
);
}
// Run npm pack to test package creation
try {
log('info', 'Running npm pack to test package creation...');
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
log('info', output);
} catch (error) {
log('error', 'Failed to run npm pack:', error.message);
process.exit(1);
}
// Make scripts executable
log('info', 'Making scripts executable...');
try {
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
log('info', 'Made scripts/init.js executable');
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
log('info', 'Made scripts/dev.js executable');
} catch (error) {
log('error', 'Failed to make scripts executable:', error.message);
}
log('success', `Package preparation completed successfully! 🎉`);
log('success', `Version updated to ${newVersion}`);
log('info', 'You can now publish the package with:');
log('info', ' npm publish');
}
// Run the preparation
preparePackage();
preparePackage();

View File

@@ -1,203 +1,203 @@
{
"meta": {
"generatedAt": "2025-03-24T20:01:35.986Z",
"tasksAnalyzed": 24,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": false
},
"complexityAnalysis": [
{
"taskId": 1,
"taskTitle": "Implement Task Data Structure",
"complexityScore": 7,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
},
{
"taskId": 2,
"taskTitle": "Develop Command Line Interface Foundation",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
},
{
"taskId": 3,
"taskTitle": "Implement Basic Task Operations",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
},
{
"taskId": 4,
"taskTitle": "Create Task File Generation System",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
},
{
"taskId": 5,
"taskTitle": "Integrate Anthropic Claude API",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
},
{
"taskId": 6,
"taskTitle": "Build PRD Parsing System",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
},
{
"taskId": 7,
"taskTitle": "Implement Task Expansion with Claude",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
},
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
},
{
"taskId": 9,
"taskTitle": "Integrate Perplexity API",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
},
{
"taskId": 10,
"taskTitle": "Create Research-Backed Subtask Generation",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
},
{
"taskId": 11,
"taskTitle": "Implement Batch Operations",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
},
{
"taskId": 12,
"taskTitle": "Develop Project Initialization System",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
},
{
"taskId": 13,
"taskTitle": "Create Cursor Rules Implementation",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
},
{
"taskId": 14,
"taskTitle": "Develop Agent Workflow Guidelines",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
},
{
"taskId": 15,
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
},
{
"taskId": 16,
"taskTitle": "Create Configuration Management System",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
},
{
"taskId": 17,
"taskTitle": "Implement Comprehensive Logging System",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
},
{
"taskId": 18,
"taskTitle": "Create Comprehensive User Documentation",
"complexityScore": 7,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
},
{
"taskId": 19,
"taskTitle": "Implement Error Handling and Recovery",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
},
{
"taskId": 20,
"taskTitle": "Create Token Usage Tracking and Cost Management",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
},
{
"taskId": 21,
"taskTitle": "Refactor dev.js into Modular Components",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
},
{
"taskId": 22,
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
},
{
"taskId": 23,
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
},
{
"taskId": 24,
"taskTitle": "Implement AI-Powered Test Generation Command",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
}
]
}
"meta": {
"generatedAt": "2025-03-24T20:01:35.986Z",
"tasksAnalyzed": 24,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": false
},
"complexityAnalysis": [
{
"taskId": 1,
"taskTitle": "Implement Task Data Structure",
"complexityScore": 7,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
},
{
"taskId": 2,
"taskTitle": "Develop Command Line Interface Foundation",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
},
{
"taskId": 3,
"taskTitle": "Implement Basic Task Operations",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
},
{
"taskId": 4,
"taskTitle": "Create Task File Generation System",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
},
{
"taskId": 5,
"taskTitle": "Integrate Anthropic Claude API",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
},
{
"taskId": 6,
"taskTitle": "Build PRD Parsing System",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
},
{
"taskId": 7,
"taskTitle": "Implement Task Expansion with Claude",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
},
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
},
{
"taskId": 9,
"taskTitle": "Integrate Perplexity API",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
},
{
"taskId": 10,
"taskTitle": "Create Research-Backed Subtask Generation",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
},
{
"taskId": 11,
"taskTitle": "Implement Batch Operations",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
},
{
"taskId": 12,
"taskTitle": "Develop Project Initialization System",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
},
{
"taskId": 13,
"taskTitle": "Create Cursor Rules Implementation",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
},
{
"taskId": 14,
"taskTitle": "Develop Agent Workflow Guidelines",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
},
{
"taskId": 15,
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
},
{
"taskId": 16,
"taskTitle": "Create Configuration Management System",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
},
{
"taskId": 17,
"taskTitle": "Implement Comprehensive Logging System",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
},
{
"taskId": 18,
"taskTitle": "Create Comprehensive User Documentation",
"complexityScore": 7,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
},
{
"taskId": 19,
"taskTitle": "Implement Error Handling and Recovery",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
},
{
"taskId": 20,
"taskTitle": "Create Token Usage Tracking and Cost Management",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
},
{
"taskId": 21,
"taskTitle": "Refactor dev.js into Modular Components",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
},
{
"taskId": 22,
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
},
{
"taskId": 23,
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
},
{
"taskId": 24,
"taskTitle": "Implement AI-Powered Test Generation Command",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
}
]
}

View File

@@ -2,7 +2,7 @@
/**
* test-claude-errors.js
*
*
* A test script to verify the error handling and retry logic in the callClaude function.
* This script creates a modified version of dev.js that simulates different error scenarios.
*/
@@ -22,7 +22,7 @@ dotenv.config();
// Create a simple PRD for testing
const createTestPRD = () => {
return `# Test PRD for Error Handling
return `# Test PRD for Error Handling
## Overview
This is a simple test PRD to verify the error handling in the callClaude function.
@@ -36,21 +36,22 @@ This is a simple test PRD to verify the error handling in the callClaude functio
// Create a modified version of dev.js that simulates errors
function createErrorSimulationScript(errorType, failureCount = 2) {
// Read the original dev.js file
const devJsPath = path.join(__dirname, 'dev.js');
const devJsContent = fs.readFileSync(devJsPath, 'utf8');
// Create a modified version that simulates errors
let modifiedContent = devJsContent;
// Find the anthropic.messages.create call and replace it with our mock
const anthropicCallRegex = /const response = await anthropic\.messages\.create\(/;
let mockCode = '';
switch (errorType) {
case 'network':
mockCode = `
// Read the original dev.js file
const devJsPath = path.join(__dirname, 'dev.js');
const devJsContent = fs.readFileSync(devJsPath, 'utf8');
// Create a modified version that simulates errors
let modifiedContent = devJsContent;
// Find the anthropic.messages.create call and replace it with our mock
const anthropicCallRegex =
/const response = await anthropic\.messages\.create\(/;
let mockCode = '';
switch (errorType) {
case 'network':
mockCode = `
// Mock for network error simulation
let currentAttempt = 0;
const failureCount = ${failureCount};
@@ -65,10 +66,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
}
const response = await anthropic.messages.create(`;
break;
case 'timeout':
mockCode = `
break;
case 'timeout':
mockCode = `
// Mock for timeout error simulation
let currentAttempt = 0;
const failureCount = ${failureCount};
@@ -83,10 +84,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
}
const response = await anthropic.messages.create(`;
break;
case 'invalid-json':
mockCode = `
break;
case 'invalid-json':
mockCode = `
// Mock for invalid JSON response
let currentAttempt = 0;
const failureCount = ${failureCount};
@@ -107,10 +108,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
}
const response = await anthropic.messages.create(`;
break;
case 'empty-tasks':
mockCode = `
break;
case 'empty-tasks':
mockCode = `
// Mock for empty tasks array
let currentAttempt = 0;
const failureCount = ${failureCount};
@@ -131,82 +132,87 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
}
const response = await anthropic.messages.create(`;
break;
default:
// No modification
mockCode = `const response = await anthropic.messages.create(`;
}
// Replace the anthropic call with our mock
modifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);
// Write the modified script to a temporary file
const tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);
fs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');
return tempScriptPath;
break;
default:
// No modification
mockCode = `const response = await anthropic.messages.create(`;
}
// Replace the anthropic call with our mock
modifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);
// Write the modified script to a temporary file
const tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);
fs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');
return tempScriptPath;
}
// Function to run a test with a specific error type
async function runErrorTest(errorType, numTasks = 5, failureCount = 2) {
console.log(`\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);
// Create a test PRD
const testPRD = createTestPRD();
const testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);
fs.writeFileSync(testPRDPath, testPRD, 'utf8');
// Create a modified dev.js that simulates the specified error
const tempScriptPath = createErrorSimulationScript(errorType, failureCount);
console.log(`Created test PRD at ${testPRDPath}`);
console.log(`Created error simulation script at ${tempScriptPath}`);
console.log(`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`);
try {
// Run the modified script
execSync(`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`, {
stdio: 'inherit'
});
console.log(`${errorType} error test completed successfully`);
} catch (error) {
console.error(`${errorType} error test failed:`, error.message);
} finally {
// Clean up temporary files
if (fs.existsSync(tempScriptPath)) {
fs.unlinkSync(tempScriptPath);
}
if (fs.existsSync(testPRDPath)) {
fs.unlinkSync(testPRDPath);
}
}
console.log(`\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);
// Create a test PRD
const testPRD = createTestPRD();
const testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);
fs.writeFileSync(testPRDPath, testPRD, 'utf8');
// Create a modified dev.js that simulates the specified error
const tempScriptPath = createErrorSimulationScript(errorType, failureCount);
console.log(`Created test PRD at ${testPRDPath}`);
console.log(`Created error simulation script at ${tempScriptPath}`);
console.log(
`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`
);
try {
// Run the modified script
execSync(
`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`,
{
stdio: 'inherit'
}
);
console.log(`${errorType} error test completed successfully`);
} catch (error) {
console.error(`${errorType} error test failed:`, error.message);
} finally {
// Clean up temporary files
if (fs.existsSync(tempScriptPath)) {
fs.unlinkSync(tempScriptPath);
}
if (fs.existsSync(testPRDPath)) {
fs.unlinkSync(testPRDPath);
}
}
}
// Function to run all error tests
async function runAllErrorTests() {
console.log('Starting error handling tests for callClaude function...');
// Test 1: Network error with automatic retry
await runErrorTest('network', 5, 2);
// Test 2: Timeout error with automatic retry
await runErrorTest('timeout', 5, 2);
// Test 3: Invalid JSON response with task reduction
await runErrorTest('invalid-json', 10, 2);
// Test 4: Empty tasks array with task reduction
await runErrorTest('empty-tasks', 15, 2);
// Test 5: Exhausted retries (more failures than MAX_RETRIES)
await runErrorTest('network', 5, 4);
console.log('\nAll error tests completed!');
console.log('Starting error handling tests for callClaude function...');
// Test 1: Network error with automatic retry
await runErrorTest('network', 5, 2);
// Test 2: Timeout error with automatic retry
await runErrorTest('timeout', 5, 2);
// Test 3: Invalid JSON response with task reduction
await runErrorTest('invalid-json', 10, 2);
// Test 4: Empty tasks array with task reduction
await runErrorTest('empty-tasks', 15, 2);
// Test 5: Exhausted retries (more failures than MAX_RETRIES)
await runErrorTest('network', 5, 4);
console.log('\nAll error tests completed!');
}
// Run the tests
runAllErrorTests().catch(error => {
console.error('Error running tests:', error);
process.exit(1);
});
runAllErrorTests().catch((error) => {
console.error('Error running tests:', error);
process.exit(1);
});

View File

@@ -2,7 +2,7 @@
/**
* test-claude.js
*
*
* A simple test script to verify the improvements to the callClaude function.
* This script tests different scenarios:
* 1. Normal operation with a small PRD
@@ -24,11 +24,11 @@ dotenv.config();
// Create a simple PRD for testing
const createTestPRD = (size = 'small', taskComplexity = 'simple') => {
let content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\n\n`;
// Add more content based on size
if (size === 'small') {
content += `
let content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\n\n`;
// Add more content based on size
if (size === 'small') {
content += `
## Overview
This is a small test PRD to verify the callClaude function improvements.
@@ -44,9 +44,9 @@ This is a small test PRD to verify the callClaude function improvements.
- Backend: Node.js
- Database: MongoDB
`;
} else if (size === 'medium') {
// Medium-sized PRD with more requirements
content += `
} else if (size === 'medium') {
// Medium-sized PRD with more requirements
content += `
## Overview
This is a medium-sized test PRD to verify the callClaude function improvements.
@@ -76,20 +76,20 @@ This is a medium-sized test PRD to verify the callClaude function improvements.
- CI/CD: GitHub Actions
- Monitoring: Prometheus and Grafana
`;
} else if (size === 'large') {
// Large PRD with many requirements
content += `
} else if (size === 'large') {
// Large PRD with many requirements
content += `
## Overview
This is a large test PRD to verify the callClaude function improvements.
## Requirements
`;
// Generate 30 requirements
for (let i = 1; i <= 30; i++) {
content += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\n`;
}
content += `
// Generate 30 requirements
for (let i = 1; i <= 30; i++) {
content += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\n`;
}
content += `
## Technical Stack
- Frontend: React with TypeScript
- Backend: Node.js with Express
@@ -101,12 +101,12 @@ This is a large test PRD to verify the callClaude function improvements.
## User Stories
`;
// Generate 20 user stories
for (let i = 1; i <= 20; i++) {
content += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\n`;
}
content += `
// Generate 20 user stories
for (let i = 1; i <= 20; i++) {
content += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\n`;
}
content += `
## Non-Functional Requirements
- Performance: The system should respond within 200ms
- Scalability: The system should handle 10,000 concurrent users
@@ -114,11 +114,11 @@ This is a large test PRD to verify the callClaude function improvements.
- Security: The system should comply with OWASP top 10
- Accessibility: The system should comply with WCAG 2.1 AA
`;
}
// Add complexity if needed
if (taskComplexity === 'complex') {
content += `
}
// Add complexity if needed
if (taskComplexity === 'complex') {
content += `
## Complex Requirements
- Implement a real-time collaboration system
- Add a machine learning-based recommendation engine
@@ -131,101 +131,110 @@ This is a large test PRD to verify the callClaude function improvements.
- Implement a custom reporting system
- Add a custom dashboard builder
`;
}
return content;
}
return content;
};
// Function to run the tests
async function runTests() {
console.log('Starting tests for callClaude function improvements...');
try {
// Instead of importing the callClaude function directly, we'll use the dev.js script
// with our test PRDs by running it as a child process
// Test 1: Small PRD, 5 tasks
console.log('\n=== Test 1: Small PRD, 5 tasks ===');
const smallPRD = createTestPRD('small', 'simple');
const smallPRDPath = path.join(__dirname, 'test-small-prd.txt');
fs.writeFileSync(smallPRDPath, smallPRD, 'utf8');
console.log(`Created test PRD at ${smallPRDPath}`);
console.log('Running dev.js with small PRD...');
// Use the child_process module to run the dev.js script
const { execSync } = await import('child_process');
try {
const smallResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`, {
stdio: 'inherit'
});
console.log('Small PRD test completed successfully');
} catch (error) {
console.error('Small PRD test failed:', error.message);
}
// Test 2: Medium PRD, 15 tasks
console.log('\n=== Test 2: Medium PRD, 15 tasks ===');
const mediumPRD = createTestPRD('medium', 'simple');
const mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');
fs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');
console.log(`Created test PRD at ${mediumPRDPath}`);
console.log('Running dev.js with medium PRD...');
try {
const mediumResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`, {
stdio: 'inherit'
});
console.log('Medium PRD test completed successfully');
} catch (error) {
console.error('Medium PRD test failed:', error.message);
}
// Test 3: Large PRD, 25 tasks
console.log('\n=== Test 3: Large PRD, 25 tasks ===');
const largePRD = createTestPRD('large', 'complex');
const largePRDPath = path.join(__dirname, 'test-large-prd.txt');
fs.writeFileSync(largePRDPath, largePRD, 'utf8');
console.log(`Created test PRD at ${largePRDPath}`);
console.log('Running dev.js with large PRD...');
try {
const largeResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`, {
stdio: 'inherit'
});
console.log('Large PRD test completed successfully');
} catch (error) {
console.error('Large PRD test failed:', error.message);
}
console.log('\nAll tests completed!');
} catch (error) {
console.error('Test failed:', error);
} finally {
// Clean up test files
console.log('\nCleaning up test files...');
const testFiles = [
path.join(__dirname, 'test-small-prd.txt'),
path.join(__dirname, 'test-medium-prd.txt'),
path.join(__dirname, 'test-large-prd.txt')
];
testFiles.forEach(file => {
if (fs.existsSync(file)) {
fs.unlinkSync(file);
console.log(`Deleted ${file}`);
}
});
console.log('Cleanup complete.');
}
console.log('Starting tests for callClaude function improvements...');
try {
// Instead of importing the callClaude function directly, we'll use the dev.js script
// with our test PRDs by running it as a child process
// Test 1: Small PRD, 5 tasks
console.log('\n=== Test 1: Small PRD, 5 tasks ===');
const smallPRD = createTestPRD('small', 'simple');
const smallPRDPath = path.join(__dirname, 'test-small-prd.txt');
fs.writeFileSync(smallPRDPath, smallPRD, 'utf8');
console.log(`Created test PRD at ${smallPRDPath}`);
console.log('Running dev.js with small PRD...');
// Use the child_process module to run the dev.js script
const { execSync } = await import('child_process');
try {
const smallResult = execSync(
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`,
{
stdio: 'inherit'
}
);
console.log('Small PRD test completed successfully');
} catch (error) {
console.error('Small PRD test failed:', error.message);
}
// Test 2: Medium PRD, 15 tasks
console.log('\n=== Test 2: Medium PRD, 15 tasks ===');
const mediumPRD = createTestPRD('medium', 'simple');
const mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');
fs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');
console.log(`Created test PRD at ${mediumPRDPath}`);
console.log('Running dev.js with medium PRD...');
try {
const mediumResult = execSync(
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`,
{
stdio: 'inherit'
}
);
console.log('Medium PRD test completed successfully');
} catch (error) {
console.error('Medium PRD test failed:', error.message);
}
// Test 3: Large PRD, 25 tasks
console.log('\n=== Test 3: Large PRD, 25 tasks ===');
const largePRD = createTestPRD('large', 'complex');
const largePRDPath = path.join(__dirname, 'test-large-prd.txt');
fs.writeFileSync(largePRDPath, largePRD, 'utf8');
console.log(`Created test PRD at ${largePRDPath}`);
console.log('Running dev.js with large PRD...');
try {
const largeResult = execSync(
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`,
{
stdio: 'inherit'
}
);
console.log('Large PRD test completed successfully');
} catch (error) {
console.error('Large PRD test failed:', error.message);
}
console.log('\nAll tests completed!');
} catch (error) {
console.error('Test failed:', error);
} finally {
// Clean up test files
console.log('\nCleaning up test files...');
const testFiles = [
path.join(__dirname, 'test-small-prd.txt'),
path.join(__dirname, 'test-medium-prd.txt'),
path.join(__dirname, 'test-large-prd.txt')
];
testFiles.forEach((file) => {
if (fs.existsSync(file)) {
fs.unlinkSync(file);
console.log(`Deleted ${file}`);
}
});
console.log('Cleanup complete.');
}
}
// Run the tests
runTests().catch(error => {
console.error('Error running tests:', error);
process.exit(1);
});
runTests().catch((error) => {
console.error('Error running tests:', error);
process.exit(1);
});

102
tasks/task_040.txt Normal file
View File

@@ -0,0 +1,102 @@
# Task ID: 40
# Title: Implement Project Funding Documentation and Support Infrastructure
# Status: in-progress
# Dependencies: None
# Priority: medium
# Description: Create FUNDING.yml for GitHub Sponsors integration that outlines all financial support options for the Task Master project.
# Details:
This task involves creating a FUNDING.yml file to enable and manage funding options for the Task Master project:
**FUNDING.yml file**:
- Create a .github/FUNDING.yml file following GitHub's specifications
- Include configuration for multiple funding platforms:
- GitHub Sponsors (primary if available)
- Open Collective
- Patreon
- Ko-fi
- Liberapay
- Custom funding URLs (project website donation page)
- Research and reference successful implementation patterns from Vue.js, React, and TypeScript projects
- Ensure the FUNDING.yml contains sufficient information to guide users on how to support the project
- Include comments within the YAML file to provide context for each funding option
The implementation should maintain consistent branding and messaging with the rest of the Task Master project. Research at least 5 successful open source projects to identify best practices in funding configuration.
# Test Strategy:
Testing should verify the technical implementation of the FUNDING.yml file:
1. **FUNDING.yml validation**:
- Verify the file is correctly placed in the .github directory
- Validate YAML syntax using a linter
- Test that GitHub correctly displays funding options on the repository page
- Verify all links to external funding platforms are functional
2. **User experience testing**:
- Test the complete funding workflow from a potential supporter's perspective
- Verify the process is intuitive and barriers to contribution are minimized
- Check that the Sponsor button appears correctly on GitHub
- Ensure all funding platform links resolve to the correct destinations
- Gather feedback from 2-3 potential users on clarity and ease of use
# Subtasks:
## 1. Research and Create FUNDING.yml File [done]
### Dependencies: None
### Description: Research successful funding configurations and create the .github/FUNDING.yml file for GitHub Sponsors integration and other funding platforms.
### Details:
Implementation steps:
1. Create the .github directory at the project root if it doesn't exist
2. Research funding configurations from 5 successful open source projects (Vue.js, React, TypeScript, etc.)
3. Document the patterns and approaches used in these projects
4. Create the FUNDING.yml file with the following platforms:
- GitHub Sponsors (primary)
- Open Collective
- Patreon
- Ko-fi
- Liberapay
- Custom donation URL for the project website
5. Validate the YAML syntax using a linter
6. Test the file by pushing to a test branch and verifying the Sponsor button appears correctly on GitHub
Testing approach:
- Validate YAML syntax using yamllint or similar tool
- Test on GitHub by checking if the Sponsor button appears in the repository
- Verify each funding link resolves to the correct destination
## 4. Add Documentation Comments to FUNDING.yml [pending]
### Dependencies: 40.1
### Description: Add comprehensive comments within the FUNDING.yml file to provide context and guidance for each funding option.
### Details:
Implementation steps:
1. Add a header comment explaining the purpose of the file
2. For each funding platform entry, add comments that explain:
- What the platform is
- How funds are processed on this platform
- Any specific benefits of using this platform
- Brief instructions for potential sponsors
3. Include a comment about how sponsors will be acknowledged
4. Add information about fund allocation (maintenance, new features, infrastructure)
5. Ensure comments follow YAML comment syntax and don't break the file structure
Testing approach:
- Validate that the YAML file still passes linting with comments added
- Verify the file still functions correctly on GitHub
- Have at least one team member review the comments for clarity and completeness
## 5. Integrate Funding Information in Project README [pending]
### Dependencies: 40.1, 40.4
### Description: Add a section to the project README that highlights the funding options and directs users to the Sponsor button.
### Details:
Implementation steps:
1. Create a 'Support the Project' or 'Sponsorship' section in the README.md
2. Explain briefly why financial support matters for the project
3. Direct users to the GitHub Sponsor button
4. Mention the alternative funding platforms available
5. Include a brief note on how funds will be used
6. Add any relevant funding badges (e.g., Open Collective, GitHub Sponsors)
Testing approach:
- Review the README section for clarity and conciseness
- Verify all links work correctly
- Ensure the section is appropriately visible but doesn't overshadow project information
- Check that badges render correctly

89
tasks/task_041.txt Normal file
View File

@@ -0,0 +1,89 @@
# Task ID: 41
# Title: Implement GitHub Actions CI Workflow for Task Master
# Status: pending
# Dependencies: None
# Priority: high
# Description: Create a streamlined CI workflow file (ci.yml) that efficiently tests the Task Master codebase using GitHub Actions.
# Details:
Create a GitHub Actions workflow file at `.github/workflows/ci.yml` with the following specifications:
1. Configure the workflow to trigger on:
- Push events to any branch
- Pull request events targeting any branch
2. Core workflow configuration:
- Use Ubuntu latest as the primary testing environment
- Use Node.js 20.x (LTS) for consistency with the project
- Focus on single environment for speed and simplicity
3. Configure workflow steps to:
- Checkout the repository using actions/checkout@v4
- Set up Node.js using actions/setup-node@v4 with npm caching
- Install dependencies with 'npm ci'
- Run tests with 'npm run test:coverage'
4. Implement efficient caching:
- Cache node_modules using actions/cache@v4
- Use package-lock.json hash for cache key
- Implement proper cache restoration keys
5. Ensure proper timeouts:
- 2 minutes for dependency installation
- Appropriate timeout for test execution
6. Artifact handling:
- Upload test results and coverage reports
- Use consistent naming for artifacts
- Retain artifacts for 30 days
# Test Strategy:
To verify correct implementation of the GitHub Actions CI workflow:
1. Manual verification:
- Check that the file is correctly placed at `.github/workflows/ci.yml`
- Verify the YAML syntax is valid
- Confirm all required configurations are present
2. Functional testing:
- Push a commit to verify the workflow triggers
- Create a PR to verify the workflow runs on pull requests
- Verify test coverage reports are generated and uploaded
- Confirm caching is working effectively
3. Performance testing:
- Verify cache hits reduce installation time
- Confirm workflow completes within expected timeframe
- Check artifact upload and download speeds
# Subtasks:
## 1. Create Basic GitHub Actions Workflow [pending]
### Dependencies: None
### Description: Set up the foundational GitHub Actions workflow file with proper triggers and Node.js setup
### Details:
1. Create `.github/workflows/ci.yml`
2. Configure workflow name and triggers
3. Set up Ubuntu runner and Node.js 20.x
4. Implement checkout and Node.js setup actions
5. Configure npm caching
6. Test basic workflow functionality
## 2. Implement Test and Coverage Steps [pending]
### Dependencies: 41.1
### Description: Add test execution and coverage reporting to the workflow
### Details:
1. Add dependency installation with proper timeout
2. Configure test execution with coverage
3. Set up test results and coverage artifacts
4. Verify artifact upload functionality
5. Test the complete workflow
## 3. Optimize Workflow Performance [pending]
### Dependencies: 41.1, 41.2
### Description: Implement caching and performance optimizations
### Details:
1. Set up node_modules caching
2. Configure cache key strategy
3. Implement proper timeout values
4. Test caching effectiveness
5. Document performance improvements

View File

@@ -2208,6 +2208,94 @@
"parentTaskId": 39
}
]
},
{
"id": 40,
"title": "Implement Project Funding Documentation and Support Infrastructure",
"description": "Create FUNDING.yml for GitHub Sponsors integration that outlines all financial support options for the Task Master project.",
"status": "in-progress",
"dependencies": [],
"priority": "medium",
"details": "This task involves creating a FUNDING.yml file to enable and manage funding options for the Task Master project:\n\n**FUNDING.yml file**:\n - Create a .github/FUNDING.yml file following GitHub's specifications\n - Include configuration for multiple funding platforms:\n - GitHub Sponsors (primary if available)\n - Open Collective\n - Patreon\n - Ko-fi\n - Liberapay\n - Custom funding URLs (project website donation page)\n - Research and reference successful implementation patterns from Vue.js, React, and TypeScript projects\n - Ensure the FUNDING.yml contains sufficient information to guide users on how to support the project\n - Include comments within the YAML file to provide context for each funding option\n\nThe implementation should maintain consistent branding and messaging with the rest of the Task Master project. Research at least 5 successful open source projects to identify best practices in funding configuration.",
"testStrategy": "Testing should verify the technical implementation of the FUNDING.yml file:\n\n1. **FUNDING.yml validation**:\n - Verify the file is correctly placed in the .github directory\n - Validate YAML syntax using a linter\n - Test that GitHub correctly displays funding options on the repository page\n - Verify all links to external funding platforms are functional\n\n2. **User experience testing**:\n - Test the complete funding workflow from a potential supporter's perspective\n - Verify the process is intuitive and barriers to contribution are minimized\n - Check that the Sponsor button appears correctly on GitHub\n - Ensure all funding platform links resolve to the correct destinations\n - Gather feedback from 2-3 potential users on clarity and ease of use",
"subtasks": [
{
"id": 1,
"title": "Research and Create FUNDING.yml File",
"description": "Research successful funding configurations and create the .github/FUNDING.yml file for GitHub Sponsors integration and other funding platforms.",
"dependencies": [],
"details": "Implementation steps:\n1. Create the .github directory at the project root if it doesn't exist\n2. Research funding configurations from 5 successful open source projects (Vue.js, React, TypeScript, etc.)\n3. Document the patterns and approaches used in these projects\n4. Create the FUNDING.yml file with the following platforms:\n - GitHub Sponsors (primary)\n - Open Collective\n - Patreon\n - Ko-fi\n - Liberapay\n - Custom donation URL for the project website\n5. Validate the YAML syntax using a linter\n6. Test the file by pushing to a test branch and verifying the Sponsor button appears correctly on GitHub\n\nTesting approach:\n- Validate YAML syntax using yamllint or similar tool\n- Test on GitHub by checking if the Sponsor button appears in the repository\n- Verify each funding link resolves to the correct destination",
"status": "done",
"parentTaskId": 40
},
{
"id": 4,
"title": "Add Documentation Comments to FUNDING.yml",
"description": "Add comprehensive comments within the FUNDING.yml file to provide context and guidance for each funding option.",
"dependencies": [
1
],
"details": "Implementation steps:\n1. Add a header comment explaining the purpose of the file\n2. For each funding platform entry, add comments that explain:\n - What the platform is\n - How funds are processed on this platform\n - Any specific benefits of using this platform\n - Brief instructions for potential sponsors\n3. Include a comment about how sponsors will be acknowledged\n4. Add information about fund allocation (maintenance, new features, infrastructure)\n5. Ensure comments follow YAML comment syntax and don't break the file structure\n\nTesting approach:\n- Validate that the YAML file still passes linting with comments added\n- Verify the file still functions correctly on GitHub\n- Have at least one team member review the comments for clarity and completeness",
"status": "pending",
"parentTaskId": 40
},
{
"id": 5,
"title": "Integrate Funding Information in Project README",
"description": "Add a section to the project README that highlights the funding options and directs users to the Sponsor button.",
"dependencies": [
1,
4
],
"details": "Implementation steps:\n1. Create a 'Support the Project' or 'Sponsorship' section in the README.md\n2. Explain briefly why financial support matters for the project\n3. Direct users to the GitHub Sponsor button\n4. Mention the alternative funding platforms available\n5. Include a brief note on how funds will be used\n6. Add any relevant funding badges (e.g., Open Collective, GitHub Sponsors)\n\nTesting approach:\n- Review the README section for clarity and conciseness\n- Verify all links work correctly\n- Ensure the section is appropriately visible but doesn't overshadow project information\n- Check that badges render correctly",
"status": "pending",
"parentTaskId": 40
}
]
},
{
"id": 41,
"title": "Implement GitHub Actions CI Workflow for Cross-Platform Testing",
"description": "Create a CI workflow file (ci.yml) that tests the codebase across multiple Node.js versions and operating systems using GitHub Actions.",
"status": "pending",
"dependencies": [],
"priority": "high",
"details": "Create a GitHub Actions workflow file at `.github/workflows/ci.yml` with the following specifications:\n\n1. Configure the workflow to trigger on:\n - Push events to any branch\n - Pull request events targeting any branch\n\n2. Implement a matrix strategy that tests across:\n - Node.js versions: 18.x, 20.x, and 22.x\n - Operating systems: Ubuntu-latest and Windows-latest\n\n3. Include proper Git configuration steps:\n - Set Git user name to 'GitHub Actions'\n - Set Git email to 'github-actions@github.com'\n\n4. Configure workflow steps to:\n - Checkout the repository using actions/checkout@v3\n - Set up Node.js using actions/setup-node@v3 with the matrix version\n - Use npm for package management (not pnpm)\n - Install dependencies with 'npm ci'\n - Run linting with 'npm run lint' (if available)\n - Run tests with 'npm test'\n - Run build process with 'npm run build'\n\n5. Implement concurrency controls to:\n - Cancel in-progress workflows when new commits are pushed to the same PR\n - Use a concurrency group based on the GitHub ref and workflow name\n\n6. Add proper caching for npm dependencies to speed up workflow runs\n\n7. Ensure the workflow includes appropriate timeouts to prevent hung jobs",
"testStrategy": "To verify correct implementation of the GitHub Actions CI workflow:\n\n1. Manual verification:\n - Check that the file is correctly placed at `.github/workflows/ci.yml`\n - Verify the YAML syntax is valid using a YAML linter\n - Confirm all required configurations (triggers, matrix, steps) are present\n\n2. Functional testing:\n - Push a commit to a feature branch to confirm the workflow triggers\n - Create a PR to verify the workflow runs on pull requests\n - Verify the workflow successfully runs on both Ubuntu and Windows\n - Confirm tests run against all three Node.js versions (18, 20, 22)\n - Test concurrency by pushing multiple commits to the same PR rapidly\n\n3. Edge case testing:\n - Introduce a failing test and verify the workflow reports failure\n - Test with a large dependency tree to verify caching works correctly\n - Verify the workflow handles non-ASCII characters in file paths correctly (particularly on Windows)\n\n4. Check workflow logs to ensure:\n - Git configuration is applied correctly\n - Dependencies are installed with npm (not pnpm)\n - All matrix combinations run independently\n - Concurrency controls cancel redundant workflow runs",
"subtasks": [
{
"id": 1,
"title": "Create Basic GitHub Actions Workflow Structure",
"description": "Set up the foundational GitHub Actions workflow file with triggers, checkout, and Node.js setup using matrix strategy",
"dependencies": [],
"details": "1. Create `.github/workflows/` directory if it doesn't exist\n2. Create a new file `ci.yml` inside this directory\n3. Define the workflow name at the top of the file\n4. Configure triggers for push events to any branch and pull request events targeting any branch\n5. Set up the matrix strategy for Node.js versions (18.x, 20.x, 22.x) and operating systems (Ubuntu-latest, Windows-latest)\n6. Configure the job to checkout the repository using actions/checkout@v3\n7. Set up Node.js using actions/setup-node@v3 with the matrix version\n8. Add proper caching for npm dependencies\n9. Test the workflow by pushing the file to a test branch and verifying it triggers correctly\n10. Verify that the matrix builds are running on all specified Node versions and operating systems",
"status": "pending",
"parentTaskId": 41
},
{
"id": 2,
"title": "Implement Build and Test Steps with Git Configuration",
"description": "Add the core build and test steps to the workflow, including Git configuration, dependency installation, and execution of lint, test, and build commands",
"dependencies": [
1
],
"details": "1. Add Git configuration steps to set user name to 'GitHub Actions' and email to 'github-actions@github.com'\n2. Add step to install dependencies with 'npm ci'\n3. Add conditional step to run linting with 'npm run lint' if available\n4. Add step to run tests with 'npm test'\n5. Add step to run build process with 'npm run build'\n6. Ensure each step has appropriate names for clear visibility in GitHub Actions UI\n7. Add appropriate error handling and continue-on-error settings where needed\n8. Test the workflow by pushing a change and verifying all build steps execute correctly\n9. Verify that the workflow correctly runs on both Ubuntu and Windows environments\n10. Ensure that all commands use the correct syntax for cross-platform compatibility",
"status": "pending",
"parentTaskId": 41
},
{
"id": 3,
"title": "Add Workflow Optimization Features",
"description": "Implement concurrency controls, timeouts, and other optimization features to improve workflow efficiency and reliability",
"dependencies": [
1,
2
],
"details": "1. Implement concurrency controls to cancel in-progress workflows when new commits are pushed to the same PR\n2. Define a concurrency group based on the GitHub ref and workflow name\n3. Add appropriate timeouts to prevent hung jobs (typically 30-60 minutes depending on project complexity)\n4. Add status badges to the README.md file to show build status\n5. Optimize the workflow by adding appropriate 'if' conditions to skip unnecessary steps\n6. Add job summary outputs to provide clear information about the build results\n7. Test the concurrency feature by pushing multiple commits in quick succession to a PR\n8. Verify that old workflow runs are canceled when new commits are pushed\n9. Test timeout functionality by temporarily adding a long-running step\n10. Document the CI workflow in project documentation, explaining what it does and how to troubleshoot common issues",
"status": "pending",
"parentTaskId": 41
}
]
}
]
}

View File

@@ -1,4 +1,8 @@
import { checkForUpdate, displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
import {
checkForUpdate,
displayUpgradeNotification,
compareVersions
} from './scripts/modules/commands.js';
import fs from 'fs';
import path from 'path';
@@ -7,63 +11,73 @@ process.env.FORCE_VERSION = '0.9.30';
// Create a mock package.json in memory for testing
const mockPackageJson = {
name: 'task-master-ai',
version: '0.9.30'
name: 'task-master-ai',
version: '0.9.30'
};
// Modified version of checkForUpdate that doesn't use HTTP for testing
async function testCheckForUpdate(simulatedLatestVersion) {
// Get current version - use our forced version
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
console.log(`Using simulated current version: ${currentVersion}`);
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
// Compare versions
const needsUpdate = compareVersions(currentVersion, simulatedLatestVersion) < 0;
return {
currentVersion,
latestVersion: simulatedLatestVersion,
needsUpdate
};
// Get current version - use our forced version
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
console.log(`Using simulated current version: ${currentVersion}`);
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
// Compare versions
const needsUpdate =
compareVersions(currentVersion, simulatedLatestVersion) < 0;
return {
currentVersion,
latestVersion: simulatedLatestVersion,
needsUpdate
};
}
// Test with current version older than latest (should show update notice)
async function runTest() {
console.log('=== Testing version check scenarios ===\n');
// Scenario 1: Update available
console.log('\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---');
const updateInfo1 = await testCheckForUpdate('1.0.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo1.currentVersion}`);
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
if (updateInfo1.needsUpdate) {
console.log('\nDisplaying upgrade notification:');
displayUpgradeNotification(updateInfo1.currentVersion, updateInfo1.latestVersion);
}
// Scenario 2: No update needed (versions equal)
console.log('\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---');
const updateInfo2 = await testCheckForUpdate('0.9.30');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo2.currentVersion}`);
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
// Scenario 3: Development version (current newer than latest)
console.log('\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---');
const updateInfo3 = await testCheckForUpdate('0.9.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo3.currentVersion}`);
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
console.log('\n=== Test complete ===');
console.log('=== Testing version check scenarios ===\n');
// Scenario 1: Update available
console.log(
'\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---'
);
const updateInfo1 = await testCheckForUpdate('1.0.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo1.currentVersion}`);
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
if (updateInfo1.needsUpdate) {
console.log('\nDisplaying upgrade notification:');
displayUpgradeNotification(
updateInfo1.currentVersion,
updateInfo1.latestVersion
);
}
// Scenario 2: No update needed (versions equal)
console.log(
'\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---'
);
const updateInfo2 = await testCheckForUpdate('0.9.30');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo2.currentVersion}`);
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
// Scenario 3: Development version (current newer than latest)
console.log(
'\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---'
);
const updateInfo3 = await testCheckForUpdate('0.9.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo3.currentVersion}`);
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
console.log('\n=== Test complete ===');
}
// Run all tests
runTest();
runTest();

View File

@@ -1,4 +1,7 @@
import { displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
import {
displayUpgradeNotification,
compareVersions
} from './scripts/modules/commands.js';
// Simulate different version scenarios
console.log('=== Simulating version check ===\n');
@@ -8,15 +11,25 @@ console.log('Scenario 1: Current version older than latest');
displayUpgradeNotification('0.9.30', '1.0.0');
// 2. Current version same as latest (no update needed)
console.log('\nScenario 2: Current version same as latest (this would not normally show a notice)');
console.log(
'\nScenario 2: Current version same as latest (this would not normally show a notice)'
);
console.log('Current: 1.0.0, Latest: 1.0.0');
console.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));
console.log('Update needed:', compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No');
console.log(
'Update needed:',
compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No'
);
// 3. Current version newer than latest (e.g., development version, would not show notice)
console.log('\nScenario 3: Current version newer than latest (this would not normally show a notice)');
console.log(
'\nScenario 3: Current version newer than latest (this would not normally show a notice)'
);
console.log('Current: 1.1.0, Latest: 1.0.0');
console.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));
console.log('Update needed:', compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No');
console.log(
'Update needed:',
compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No'
);
console.log('\n=== Test complete ===');
console.log('\n=== Test complete ===');

View File

@@ -60,4 +60,4 @@ We aim for at least 80% test coverage for all code paths. Coverage reports can b
```bash
npm run test:coverage
```
```

View File

@@ -3,42 +3,50 @@
*/
export const sampleClaudeResponse = {
tasks: [
{
id: 1,
title: "Setup Task Data Structure",
description: "Implement the core task data structure and file operations",
status: "pending",
dependencies: [],
priority: "high",
details: "Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.",
testStrategy: "Verify tasks.json is created with the correct structure and that task data can be read from and written to the file."
},
{
id: 2,
title: "Implement CLI Foundation",
description: "Create the command-line interface foundation with basic commands",
status: "pending",
dependencies: [1],
priority: "high",
details: "Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.",
testStrategy: "Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly."
},
{
id: 3,
title: "Develop Task Management Operations",
description: "Implement core operations for creating, reading, updating, and deleting tasks",
status: "pending",
dependencies: [1],
priority: "medium",
details: "Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.",
testStrategy: "Create unit tests for each CRUD operation to verify they correctly modify the task data."
}
],
metadata: {
projectName: "Task Management CLI",
totalTasks: 3,
sourceFile: "tests/fixtures/sample-prd.txt",
generatedAt: "2023-12-15"
}
};
tasks: [
{
id: 1,
title: 'Setup Task Data Structure',
description: 'Implement the core task data structure and file operations',
status: 'pending',
dependencies: [],
priority: 'high',
details:
'Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.',
testStrategy:
'Verify tasks.json is created with the correct structure and that task data can be read from and written to the file.'
},
{
id: 2,
title: 'Implement CLI Foundation',
description:
'Create the command-line interface foundation with basic commands',
status: 'pending',
dependencies: [1],
priority: 'high',
details:
'Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.',
testStrategy:
'Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly.'
},
{
id: 3,
title: 'Develop Task Management Operations',
description:
'Implement core operations for creating, reading, updating, and deleting tasks',
status: 'pending',
dependencies: [1],
priority: 'medium',
details:
'Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.',
testStrategy:
'Create unit tests for each CRUD operation to verify they correctly modify the task data.'
}
],
metadata: {
projectName: 'Task Management CLI',
totalTasks: 3,
sourceFile: 'tests/fixtures/sample-prd.txt',
generatedAt: '2023-12-15'
}
};

View File

@@ -3,70 +3,72 @@
*/
export const sampleTasks = {
meta: {
projectName: "Test Project",
projectVersion: "1.0.0",
createdAt: "2023-01-01T00:00:00.000Z",
updatedAt: "2023-01-01T00:00:00.000Z"
},
tasks: [
{
id: 1,
title: "Initialize Project",
description: "Set up the project structure and dependencies",
status: "done",
dependencies: [],
priority: "high",
details: "Create directory structure, initialize package.json, and install dependencies",
testStrategy: "Verify all directories and files are created correctly"
},
{
id: 2,
title: "Create Core Functionality",
description: "Implement the main features of the application",
status: "in-progress",
dependencies: [1],
priority: "high",
details: "Implement user authentication, data processing, and API endpoints",
testStrategy: "Write unit tests for all core functions"
},
{
id: 3,
title: "Implement UI Components",
description: "Create the user interface components",
status: "pending",
dependencies: [2],
priority: "medium",
details: "Design and implement React components for the user interface",
testStrategy: "Test components with React Testing Library",
subtasks: [
{
id: 1,
title: "Create Header Component",
description: "Implement the header component",
status: "pending",
dependencies: [],
details: "Create a responsive header with navigation links"
},
{
id: 2,
title: "Create Footer Component",
description: "Implement the footer component",
status: "pending",
dependencies: [],
details: "Create a footer with copyright information and links"
}
]
}
]
meta: {
projectName: 'Test Project',
projectVersion: '1.0.0',
createdAt: '2023-01-01T00:00:00.000Z',
updatedAt: '2023-01-01T00:00:00.000Z'
},
tasks: [
{
id: 1,
title: 'Initialize Project',
description: 'Set up the project structure and dependencies',
status: 'done',
dependencies: [],
priority: 'high',
details:
'Create directory structure, initialize package.json, and install dependencies',
testStrategy: 'Verify all directories and files are created correctly'
},
{
id: 2,
title: 'Create Core Functionality',
description: 'Implement the main features of the application',
status: 'in-progress',
dependencies: [1],
priority: 'high',
details:
'Implement user authentication, data processing, and API endpoints',
testStrategy: 'Write unit tests for all core functions'
},
{
id: 3,
title: 'Implement UI Components',
description: 'Create the user interface components',
status: 'pending',
dependencies: [2],
priority: 'medium',
details: 'Design and implement React components for the user interface',
testStrategy: 'Test components with React Testing Library',
subtasks: [
{
id: 1,
title: 'Create Header Component',
description: 'Implement the header component',
status: 'pending',
dependencies: [],
details: 'Create a responsive header with navigation links'
},
{
id: 2,
title: 'Create Footer Component',
description: 'Implement the footer component',
status: 'pending',
dependencies: [],
details: 'Create a footer with copyright information and links'
}
]
}
]
};
export const emptySampleTasks = {
meta: {
projectName: "Empty Project",
projectVersion: "1.0.0",
createdAt: "2023-01-01T00:00:00.000Z",
updatedAt: "2023-01-01T00:00:00.000Z"
},
tasks: []
};
meta: {
projectName: 'Empty Project',
projectVersion: '1.0.0',
createdAt: '2023-01-01T00:00:00.000Z',
updatedAt: '2023-01-01T00:00:00.000Z'
},
tasks: []
};

View File

@@ -17,10 +17,10 @@ import { listTasksDirect } from '../../../mcp-server/src/core/task-master-core.j
// Mock logger
const mockLogger = {
info: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
warn: jest.fn()
info: jest.fn(),
error: jest.fn(),
debug: jest.fn(),
warn: jest.fn()
};
// Test file paths
@@ -28,164 +28,164 @@ const testProjectRoot = path.join(__dirname, '../../fixture');
const testTasksPath = path.join(testProjectRoot, 'test-tasks.json');
describe('MCP Server Direct Functions', () => {
// Create test data before tests
beforeAll(() => {
// Create test directory if it doesn't exist
if (!fs.existsSync(testProjectRoot)) {
fs.mkdirSync(testProjectRoot, { recursive: true });
}
// Create a sample tasks.json file for testing
const sampleTasks = {
meta: {
projectName: 'Test Project',
version: '1.0.0'
},
tasks: [
{
id: 1,
title: 'Task 1',
description: 'First task',
status: 'done',
dependencies: [],
priority: 'high'
},
{
id: 2,
title: 'Task 2',
description: 'Second task',
status: 'in-progress',
dependencies: [1],
priority: 'medium',
subtasks: [
{
id: 1,
title: 'Subtask 2.1',
description: 'First subtask',
status: 'done'
},
{
id: 2,
title: 'Subtask 2.2',
description: 'Second subtask',
status: 'pending'
}
]
},
{
id: 3,
title: 'Task 3',
description: 'Third task',
status: 'pending',
dependencies: [1, 2],
priority: 'low'
}
]
};
fs.writeFileSync(testTasksPath, JSON.stringify(sampleTasks, null, 2));
});
// Clean up after tests
afterAll(() => {
// Remove test tasks file
if (fs.existsSync(testTasksPath)) {
fs.unlinkSync(testTasksPath);
}
// Try to remove the directory (will only work if empty)
try {
fs.rmdirSync(testProjectRoot);
} catch (error) {
// Ignore errors if the directory isn't empty
}
});
// Reset mocks before each test
beforeEach(() => {
jest.clearAllMocks();
});
describe('listTasksDirect', () => {
test('should return all tasks when no filter is provided', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: testTasksPath
};
// Act
const result = await listTasksDirect(args, mockLogger);
// Assert
expect(result.success).toBe(true);
expect(result.data.tasks.length).toBe(3);
expect(result.data.stats.total).toBe(3);
expect(result.data.stats.completed).toBe(1);
expect(result.data.stats.inProgress).toBe(1);
expect(result.data.stats.pending).toBe(1);
expect(mockLogger.info).toHaveBeenCalled();
});
test('should filter tasks by status', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: testTasksPath,
status: 'pending'
};
// Act
const result = await listTasksDirect(args, mockLogger);
// Assert
expect(result.success).toBe(true);
expect(result.data.tasks.length).toBe(1);
expect(result.data.tasks[0].id).toBe(3);
expect(result.data.filter).toBe('pending');
});
test('should include subtasks when requested', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: testTasksPath,
withSubtasks: true
};
// Act
const result = await listTasksDirect(args, mockLogger);
// Assert
expect(result.success).toBe(true);
// Verify subtasks are included
const taskWithSubtasks = result.data.tasks.find(t => t.id === 2);
expect(taskWithSubtasks.subtasks).toBeDefined();
expect(taskWithSubtasks.subtasks.length).toBe(2);
// Verify subtask details
expect(taskWithSubtasks.subtasks[0].id).toBe(1);
expect(taskWithSubtasks.subtasks[0].title).toBe('Subtask 2.1');
expect(taskWithSubtasks.subtasks[0].status).toBe('done');
});
test('should handle errors gracefully', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: 'non-existent-file.json'
};
// Act
const result = await listTasksDirect(args, mockLogger);
// Assert
expect(result.success).toBe(false);
expect(result.error).toBeDefined();
expect(result.error.code).toBeDefined();
expect(result.error.message).toBeDefined();
expect(mockLogger.error).toHaveBeenCalled();
});
});
});
// Create test data before tests
beforeAll(() => {
// Create test directory if it doesn't exist
if (!fs.existsSync(testProjectRoot)) {
fs.mkdirSync(testProjectRoot, { recursive: true });
}
// Create a sample tasks.json file for testing
const sampleTasks = {
meta: {
projectName: 'Test Project',
version: '1.0.0'
},
tasks: [
{
id: 1,
title: 'Task 1',
description: 'First task',
status: 'done',
dependencies: [],
priority: 'high'
},
{
id: 2,
title: 'Task 2',
description: 'Second task',
status: 'in-progress',
dependencies: [1],
priority: 'medium',
subtasks: [
{
id: 1,
title: 'Subtask 2.1',
description: 'First subtask',
status: 'done'
},
{
id: 2,
title: 'Subtask 2.2',
description: 'Second subtask',
status: 'pending'
}
]
},
{
id: 3,
title: 'Task 3',
description: 'Third task',
status: 'pending',
dependencies: [1, 2],
priority: 'low'
}
]
};
fs.writeFileSync(testTasksPath, JSON.stringify(sampleTasks, null, 2));
});
// Clean up after tests
afterAll(() => {
// Remove test tasks file
if (fs.existsSync(testTasksPath)) {
fs.unlinkSync(testTasksPath);
}
// Try to remove the directory (will only work if empty)
try {
fs.rmdirSync(testProjectRoot);
} catch (error) {
// Ignore errors if the directory isn't empty
}
});
// Reset mocks before each test
beforeEach(() => {
jest.clearAllMocks();
});
describe('listTasksDirect', () => {
test('should return all tasks when no filter is provided', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: testTasksPath
};
// Act
const result = await listTasksDirect(args, mockLogger);
// Assert
expect(result.success).toBe(true);
expect(result.data.tasks.length).toBe(3);
expect(result.data.stats.total).toBe(3);
expect(result.data.stats.completed).toBe(1);
expect(result.data.stats.inProgress).toBe(1);
expect(result.data.stats.pending).toBe(1);
expect(mockLogger.info).toHaveBeenCalled();
});
test('should filter tasks by status', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: testTasksPath,
status: 'pending'
};
// Act
const result = await listTasksDirect(args, mockLogger);
// Assert
expect(result.success).toBe(true);
expect(result.data.tasks.length).toBe(1);
expect(result.data.tasks[0].id).toBe(3);
expect(result.data.filter).toBe('pending');
});
test('should include subtasks when requested', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: testTasksPath,
withSubtasks: true
};
// Act
const result = await listTasksDirect(args, mockLogger);
// Assert
expect(result.success).toBe(true);
// Verify subtasks are included
const taskWithSubtasks = result.data.tasks.find((t) => t.id === 2);
expect(taskWithSubtasks.subtasks).toBeDefined();
expect(taskWithSubtasks.subtasks.length).toBe(2);
// Verify subtask details
expect(taskWithSubtasks.subtasks[0].id).toBe(1);
expect(taskWithSubtasks.subtasks[0].title).toBe('Subtask 2.1');
expect(taskWithSubtasks.subtasks[0].status).toBe('done');
});
test('should handle errors gracefully', async () => {
// Arrange
const args = {
projectRoot: testProjectRoot,
file: 'non-existent-file.json'
};
// Act
const result = await listTasksDirect(args, mockLogger);
// Assert
expect(result.success).toBe(false);
expect(result.error).toBeDefined();
expect(result.error.code).toBeDefined();
expect(result.error.message).toBeDefined();
expect(mockLogger.error).toHaveBeenCalled();
});
});
});

View File

@@ -1,6 +1,6 @@
/**
* Jest setup file
*
*
* This file is run before each test suite to set up the test environment.
*/
@@ -16,15 +16,15 @@ process.env.PROJECT_NAME = 'Test Project';
process.env.PROJECT_VERSION = '1.0.0';
// Add global test helpers if needed
global.wait = (ms) => new Promise(resolve => setTimeout(resolve, ms));
global.wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
// If needed, silence console during tests
if (process.env.SILENCE_CONSOLE === 'true') {
global.console = {
...console,
log: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
};
}
global.console = {
...console,
log: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn()
};
}

View File

@@ -10,62 +10,68 @@ const mockLog = jest.fn();
// Mock dependencies
jest.mock('@anthropic-ai/sdk', () => {
const mockCreate = jest.fn().mockResolvedValue({
content: [{ text: 'AI response' }],
});
const mockAnthropicInstance = {
messages: {
create: mockCreate
}
};
const mockAnthropicConstructor = jest.fn().mockImplementation(() => mockAnthropicInstance);
return {
Anthropic: mockAnthropicConstructor
};
const mockCreate = jest.fn().mockResolvedValue({
content: [{ text: 'AI response' }]
});
const mockAnthropicInstance = {
messages: {
create: mockCreate
}
};
const mockAnthropicConstructor = jest
.fn()
.mockImplementation(() => mockAnthropicInstance);
return {
Anthropic: mockAnthropicConstructor
};
});
// Use jest.fn() directly for OpenAI mock
const mockOpenAIInstance = {
chat: {
completions: {
create: jest.fn().mockResolvedValue({
choices: [{ message: { content: 'Perplexity response' } }],
}),
},
},
chat: {
completions: {
create: jest.fn().mockResolvedValue({
choices: [{ message: { content: 'Perplexity response' } }]
})
}
}
};
const mockOpenAI = jest.fn().mockImplementation(() => mockOpenAIInstance);
jest.mock('openai', () => {
return { default: mockOpenAI };
return { default: mockOpenAI };
});
jest.mock('dotenv', () => ({
config: jest.fn(),
config: jest.fn()
}));
jest.mock('../../scripts/modules/utils.js', () => ({
CONFIG: {
model: 'claude-3-sonnet-20240229',
temperature: 0.7,
maxTokens: 4000,
},
log: mockLog,
sanitizePrompt: jest.fn(text => text),
CONFIG: {
model: 'claude-3-sonnet-20240229',
temperature: 0.7,
maxTokens: 4000
},
log: mockLog,
sanitizePrompt: jest.fn((text) => text)
}));
jest.mock('../../scripts/modules/ui.js', () => ({
startLoadingIndicator: jest.fn().mockReturnValue('mockLoader'),
stopLoadingIndicator: jest.fn(),
startLoadingIndicator: jest.fn().mockReturnValue('mockLoader'),
stopLoadingIndicator: jest.fn()
}));
// Mock anthropic global object
global.anthropic = {
messages: {
create: jest.fn().mockResolvedValue({
content: [{ text: '[{"id": 1, "title": "Test", "description": "Test", "dependencies": [], "details": "Test"}]' }],
}),
},
messages: {
create: jest.fn().mockResolvedValue({
content: [
{
text: '[{"id": 1, "title": "Test", "description": "Test", "dependencies": [], "details": "Test"}]'
}
]
})
}
};
// Mock process.env
@@ -75,20 +81,20 @@ const originalEnv = process.env;
import { Anthropic } from '@anthropic-ai/sdk';
describe('AI Services Module', () => {
beforeEach(() => {
jest.clearAllMocks();
process.env = { ...originalEnv };
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
process.env.PERPLEXITY_API_KEY = 'test-perplexity-key';
});
beforeEach(() => {
jest.clearAllMocks();
process.env = { ...originalEnv };
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
process.env.PERPLEXITY_API_KEY = 'test-perplexity-key';
});
afterEach(() => {
process.env = originalEnv;
});
afterEach(() => {
process.env = originalEnv;
});
describe('parseSubtasksFromText function', () => {
test('should parse subtasks from JSON text', () => {
const text = `Here's your list of subtasks:
describe('parseSubtasksFromText function', () => {
test('should parse subtasks from JSON text', () => {
const text = `Here's your list of subtasks:
[
{
@@ -109,31 +115,31 @@ describe('AI Services Module', () => {
These subtasks will help you implement the parent task efficiently.`;
const result = parseSubtasksFromText(text, 1, 2, 5);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
id: 1,
title: 'Implement database schema',
description: 'Design and implement the database schema for user data',
status: 'pending',
dependencies: [],
details: 'Create tables for users, preferences, and settings',
parentTaskId: 5
});
expect(result[1]).toEqual({
id: 2,
title: 'Create API endpoints',
description: 'Develop RESTful API endpoints for user operations',
status: 'pending',
dependencies: [],
details: 'Implement CRUD operations for user management',
parentTaskId: 5
});
});
const result = parseSubtasksFromText(text, 1, 2, 5);
test('should handle subtasks with dependencies', () => {
const text = `
expect(result).toHaveLength(2);
expect(result[0]).toEqual({
id: 1,
title: 'Implement database schema',
description: 'Design and implement the database schema for user data',
status: 'pending',
dependencies: [],
details: 'Create tables for users, preferences, and settings',
parentTaskId: 5
});
expect(result[1]).toEqual({
id: 2,
title: 'Create API endpoints',
description: 'Develop RESTful API endpoints for user operations',
status: 'pending',
dependencies: [],
details: 'Implement CRUD operations for user management',
parentTaskId: 5
});
});
test('should handle subtasks with dependencies', () => {
const text = `
[
{
"id": 1,
@@ -151,15 +157,15 @@ These subtasks will help you implement the parent task efficiently.`;
}
]`;
const result = parseSubtasksFromText(text, 1, 2, 5);
expect(result).toHaveLength(2);
expect(result[0].dependencies).toEqual([]);
expect(result[1].dependencies).toEqual([1]);
});
const result = parseSubtasksFromText(text, 1, 2, 5);
test('should handle complex dependency lists', () => {
const text = `
expect(result).toHaveLength(2);
expect(result[0].dependencies).toEqual([]);
expect(result[1].dependencies).toEqual([1]);
});
test('should handle complex dependency lists', () => {
const text = `
[
{
"id": 1,
@@ -184,39 +190,39 @@ These subtasks will help you implement the parent task efficiently.`;
}
]`;
const result = parseSubtasksFromText(text, 1, 3, 5);
expect(result).toHaveLength(3);
expect(result[2].dependencies).toEqual([1, 2]);
});
const result = parseSubtasksFromText(text, 1, 3, 5);
test('should create fallback subtasks for empty text', () => {
const emptyText = '';
const result = parseSubtasksFromText(emptyText, 1, 2, 5);
// Verify fallback subtasks structure
expect(result).toHaveLength(2);
expect(result[0]).toMatchObject({
id: 1,
title: 'Subtask 1',
description: 'Auto-generated fallback subtask',
status: 'pending',
dependencies: [],
parentTaskId: 5
});
expect(result[1]).toMatchObject({
id: 2,
title: 'Subtask 2',
description: 'Auto-generated fallback subtask',
status: 'pending',
dependencies: [],
parentTaskId: 5
});
});
expect(result).toHaveLength(3);
expect(result[2].dependencies).toEqual([1, 2]);
});
test('should normalize subtask IDs', () => {
const text = `
test('should create fallback subtasks for empty text', () => {
const emptyText = '';
const result = parseSubtasksFromText(emptyText, 1, 2, 5);
// Verify fallback subtasks structure
expect(result).toHaveLength(2);
expect(result[0]).toMatchObject({
id: 1,
title: 'Subtask 1',
description: 'Auto-generated fallback subtask',
status: 'pending',
dependencies: [],
parentTaskId: 5
});
expect(result[1]).toMatchObject({
id: 2,
title: 'Subtask 2',
description: 'Auto-generated fallback subtask',
status: 'pending',
dependencies: [],
parentTaskId: 5
});
});
test('should normalize subtask IDs', () => {
const text = `
[
{
"id": 10,
@@ -234,15 +240,15 @@ These subtasks will help you implement the parent task efficiently.`;
}
]`;
const result = parseSubtasksFromText(text, 1, 2, 5);
expect(result).toHaveLength(2);
expect(result[0].id).toBe(1); // Should normalize to starting ID
expect(result[1].id).toBe(2); // Should normalize to starting ID + 1
});
const result = parseSubtasksFromText(text, 1, 2, 5);
test('should convert string dependencies to numbers', () => {
const text = `
expect(result).toHaveLength(2);
expect(result[0].id).toBe(1); // Should normalize to starting ID
expect(result[1].id).toBe(2); // Should normalize to starting ID + 1
});
test('should convert string dependencies to numbers', () => {
const text = `
[
{
"id": 1,
@@ -260,140 +266,142 @@ These subtasks will help you implement the parent task efficiently.`;
}
]`;
const result = parseSubtasksFromText(text, 1, 2, 5);
expect(result[1].dependencies).toEqual([1]);
expect(typeof result[1].dependencies[0]).toBe('number');
});
const result = parseSubtasksFromText(text, 1, 2, 5);
test('should create fallback subtasks for invalid JSON', () => {
const text = `This is not valid JSON and cannot be parsed`;
expect(result[1].dependencies).toEqual([1]);
expect(typeof result[1].dependencies[0]).toBe('number');
});
const result = parseSubtasksFromText(text, 1, 2, 5);
// Verify fallback subtasks structure
expect(result).toHaveLength(2);
expect(result[0]).toMatchObject({
id: 1,
title: 'Subtask 1',
description: 'Auto-generated fallback subtask',
status: 'pending',
dependencies: [],
parentTaskId: 5
});
expect(result[1]).toMatchObject({
id: 2,
title: 'Subtask 2',
description: 'Auto-generated fallback subtask',
status: 'pending',
dependencies: [],
parentTaskId: 5
});
});
});
test('should create fallback subtasks for invalid JSON', () => {
const text = `This is not valid JSON and cannot be parsed`;
describe('handleClaudeError function', () => {
// Import the function directly for testing
let handleClaudeError;
beforeAll(async () => {
// Dynamic import to get the actual function
const module = await import('../../scripts/modules/ai-services.js');
handleClaudeError = module.handleClaudeError;
});
const result = parseSubtasksFromText(text, 1, 2, 5);
test('should handle overloaded_error type', () => {
const error = {
type: 'error',
error: {
type: 'overloaded_error',
message: 'Claude is experiencing high volume'
}
};
// Mock process.env to include PERPLEXITY_API_KEY
const originalEnv = process.env;
process.env = { ...originalEnv, PERPLEXITY_API_KEY: 'test-key' };
const result = handleClaudeError(error);
// Restore original env
process.env = originalEnv;
expect(result).toContain('Claude is currently overloaded');
expect(result).toContain('fall back to Perplexity AI');
});
// Verify fallback subtasks structure
expect(result).toHaveLength(2);
expect(result[0]).toMatchObject({
id: 1,
title: 'Subtask 1',
description: 'Auto-generated fallback subtask',
status: 'pending',
dependencies: [],
parentTaskId: 5
});
expect(result[1]).toMatchObject({
id: 2,
title: 'Subtask 2',
description: 'Auto-generated fallback subtask',
status: 'pending',
dependencies: [],
parentTaskId: 5
});
});
});
test('should handle rate_limit_error type', () => {
const error = {
type: 'error',
error: {
type: 'rate_limit_error',
message: 'Rate limit exceeded'
}
};
const result = handleClaudeError(error);
expect(result).toContain('exceeded the rate limit');
});
describe('handleClaudeError function', () => {
// Import the function directly for testing
let handleClaudeError;
test('should handle invalid_request_error type', () => {
const error = {
type: 'error',
error: {
type: 'invalid_request_error',
message: 'Invalid request parameters'
}
};
const result = handleClaudeError(error);
expect(result).toContain('issue with the request format');
});
beforeAll(async () => {
// Dynamic import to get the actual function
const module = await import('../../scripts/modules/ai-services.js');
handleClaudeError = module.handleClaudeError;
});
test('should handle timeout errors', () => {
const error = {
message: 'Request timed out after 60000ms'
};
const result = handleClaudeError(error);
expect(result).toContain('timed out');
});
test('should handle overloaded_error type', () => {
const error = {
type: 'error',
error: {
type: 'overloaded_error',
message: 'Claude is experiencing high volume'
}
};
test('should handle network errors', () => {
const error = {
message: 'Network error occurred'
};
const result = handleClaudeError(error);
expect(result).toContain('network error');
});
// Mock process.env to include PERPLEXITY_API_KEY
const originalEnv = process.env;
process.env = { ...originalEnv, PERPLEXITY_API_KEY: 'test-key' };
test('should handle generic errors', () => {
const error = {
message: 'Something unexpected happened'
};
const result = handleClaudeError(error);
expect(result).toContain('Error communicating with Claude');
expect(result).toContain('Something unexpected happened');
});
});
const result = handleClaudeError(error);
describe('Anthropic client configuration', () => {
test('should include output-128k beta header in client configuration', async () => {
// Read the file content to verify the change is present
const fs = await import('fs');
const path = await import('path');
const filePath = path.resolve('./scripts/modules/ai-services.js');
const fileContent = fs.readFileSync(filePath, 'utf8');
// Check if the beta header is in the file
expect(fileContent).toContain("'anthropic-beta': 'output-128k-2025-02-19'");
});
});
});
// Restore original env
process.env = originalEnv;
expect(result).toContain('Claude is currently overloaded');
expect(result).toContain('fall back to Perplexity AI');
});
test('should handle rate_limit_error type', () => {
const error = {
type: 'error',
error: {
type: 'rate_limit_error',
message: 'Rate limit exceeded'
}
};
const result = handleClaudeError(error);
expect(result).toContain('exceeded the rate limit');
});
test('should handle invalid_request_error type', () => {
const error = {
type: 'error',
error: {
type: 'invalid_request_error',
message: 'Invalid request parameters'
}
};
const result = handleClaudeError(error);
expect(result).toContain('issue with the request format');
});
test('should handle timeout errors', () => {
const error = {
message: 'Request timed out after 60000ms'
};
const result = handleClaudeError(error);
expect(result).toContain('timed out');
});
test('should handle network errors', () => {
const error = {
message: 'Network error occurred'
};
const result = handleClaudeError(error);
expect(result).toContain('network error');
});
test('should handle generic errors', () => {
const error = {
message: 'Something unexpected happened'
};
const result = handleClaudeError(error);
expect(result).toContain('Error communicating with Claude');
expect(result).toContain('Something unexpected happened');
});
});
describe('Anthropic client configuration', () => {
test('should include output-128k beta header in client configuration', async () => {
// Read the file content to verify the change is present
const fs = await import('fs');
const path = await import('path');
const filePath = path.resolve('./scripts/modules/ai-services.js');
const fileContent = fs.readFileSync(filePath, 'utf8');
// Check if the beta header is in the file
expect(fileContent).toContain(
"'anthropic-beta': 'output-128k-2025-02-19'"
);
});
});
});

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -5,393 +5,396 @@ import os from 'os';
// Mock external modules
jest.mock('child_process', () => ({
execSync: jest.fn()
execSync: jest.fn()
}));
jest.mock('readline', () => ({
createInterface: jest.fn(() => ({
question: jest.fn(),
close: jest.fn()
}))
createInterface: jest.fn(() => ({
question: jest.fn(),
close: jest.fn()
}))
}));
// Mock figlet for banner display
jest.mock('figlet', () => ({
default: {
textSync: jest.fn(() => 'Task Master')
}
default: {
textSync: jest.fn(() => 'Task Master')
}
}));
// Mock console methods
jest.mock('console', () => ({
log: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
clear: jest.fn()
log: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
error: jest.fn(),
clear: jest.fn()
}));
describe('Windsurf Rules File Handling', () => {
let tempDir;
beforeEach(() => {
jest.clearAllMocks();
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('.windsurfrules')) {
return 'Existing windsurf rules content';
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
// Mock specific file existence checks
if (filePath.toString().includes('package.json')) {
return true;
}
return false;
});
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
});
let tempDir;
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
beforeEach(() => {
jest.clearAllMocks();
// Test function that simulates the behavior of .windsurfrules handling
function mockCopyTemplateFile(templateName, targetPath) {
if (templateName === 'windsurfrules') {
const filename = path.basename(targetPath);
if (filename === '.windsurfrules') {
if (fs.existsSync(targetPath)) {
// Should append content when file exists
const existingContent = fs.readFileSync(targetPath, 'utf8');
const updatedContent = existingContent.trim() +
'\n\n# Added by Claude Task Master - Development Workflow Rules\n\n' +
'New content';
fs.writeFileSync(targetPath, updatedContent);
return;
}
}
// If file doesn't exist, create it normally
fs.writeFileSync(targetPath, 'New content');
}
}
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
test('creates .windsurfrules when it does not exist', () => {
// Arrange
const targetPath = path.join(tempDir, '.windsurfrules');
// Act
mockCopyTemplateFile('windsurfrules', targetPath);
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(targetPath, 'New content');
});
test('appends content to existing .windsurfrules', () => {
// Arrange
const targetPath = path.join(tempDir, '.windsurfrules');
const existingContent = 'Existing windsurf rules content';
// Override the existsSync mock just for this test
fs.existsSync.mockReturnValueOnce(true); // Target file exists
fs.readFileSync.mockReturnValueOnce(existingContent);
// Act
mockCopyTemplateFile('windsurfrules', targetPath);
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
targetPath,
expect.stringContaining(existingContent)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
targetPath,
expect.stringContaining('Added by Claude Task Master')
);
});
test('includes .windsurfrules in project structure creation', () => {
// This test verifies the expected behavior by using a mock implementation
// that represents how createProjectStructure should work
// Mock implementation of createProjectStructure
function mockCreateProjectStructure(projectName) {
// Copy template files including .windsurfrules
mockCopyTemplateFile('windsurfrules', path.join(tempDir, '.windsurfrules'));
}
// Act - call our mock implementation
mockCreateProjectStructure('test-project');
// Assert - verify that .windsurfrules was created
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.windsurfrules'),
expect.any(String)
);
});
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('.windsurfrules')) {
return 'Existing windsurf rules content';
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
// Mock specific file existence checks
if (filePath.toString().includes('package.json')) {
return true;
}
return false;
});
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
});
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
// Test function that simulates the behavior of .windsurfrules handling
function mockCopyTemplateFile(templateName, targetPath) {
if (templateName === 'windsurfrules') {
const filename = path.basename(targetPath);
if (filename === '.windsurfrules') {
if (fs.existsSync(targetPath)) {
// Should append content when file exists
const existingContent = fs.readFileSync(targetPath, 'utf8');
const updatedContent =
existingContent.trim() +
'\n\n# Added by Claude Task Master - Development Workflow Rules\n\n' +
'New content';
fs.writeFileSync(targetPath, updatedContent);
return;
}
}
// If file doesn't exist, create it normally
fs.writeFileSync(targetPath, 'New content');
}
}
test('creates .windsurfrules when it does not exist', () => {
// Arrange
const targetPath = path.join(tempDir, '.windsurfrules');
// Act
mockCopyTemplateFile('windsurfrules', targetPath);
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(targetPath, 'New content');
});
test('appends content to existing .windsurfrules', () => {
// Arrange
const targetPath = path.join(tempDir, '.windsurfrules');
const existingContent = 'Existing windsurf rules content';
// Override the existsSync mock just for this test
fs.existsSync.mockReturnValueOnce(true); // Target file exists
fs.readFileSync.mockReturnValueOnce(existingContent);
// Act
mockCopyTemplateFile('windsurfrules', targetPath);
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
targetPath,
expect.stringContaining(existingContent)
);
expect(fs.writeFileSync).toHaveBeenCalledWith(
targetPath,
expect.stringContaining('Added by Claude Task Master')
);
});
test('includes .windsurfrules in project structure creation', () => {
// This test verifies the expected behavior by using a mock implementation
// that represents how createProjectStructure should work
// Mock implementation of createProjectStructure
function mockCreateProjectStructure(projectName) {
// Copy template files including .windsurfrules
mockCopyTemplateFile(
'windsurfrules',
path.join(tempDir, '.windsurfrules')
);
}
// Act - call our mock implementation
mockCreateProjectStructure('test-project');
// Assert - verify that .windsurfrules was created
expect(fs.writeFileSync).toHaveBeenCalledWith(
path.join(tempDir, '.windsurfrules'),
expect.any(String)
);
});
});
// New test suite for MCP Configuration Handling
describe('MCP Configuration Handling', () => {
let tempDir;
beforeEach(() => {
jest.clearAllMocks();
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return JSON.stringify({
"mcpServers": {
"existing-server": {
"command": "node",
"args": ["server.js"]
}
}
});
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
// Return true for specific paths to test different scenarios
if (filePath.toString().includes('package.json')) {
return true;
}
// Default to false for other paths
return false;
});
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
});
let tempDir;
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
beforeEach(() => {
jest.clearAllMocks();
// Test function that simulates the behavior of setupMCPConfiguration
function mockSetupMCPConfiguration(targetDir, projectName) {
const mcpDirPath = path.join(targetDir, '.cursor');
const mcpJsonPath = path.join(mcpDirPath, 'mcp.json');
// Create .cursor directory if it doesn't exist
if (!fs.existsSync(mcpDirPath)) {
fs.mkdirSync(mcpDirPath, { recursive: true });
}
// New MCP config to be added - references the installed package
const newMCPServer = {
"task-master-ai": {
"command": "npx",
"args": [
"task-master-ai",
"mcp-server"
]
}
};
// Check if mcp.json already exists
if (fs.existsSync(mcpJsonPath)) {
try {
// Read existing config
const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, 'utf8'));
// Initialize mcpServers if it doesn't exist
if (!mcpConfig.mcpServers) {
mcpConfig.mcpServers = {};
}
// Add the taskmaster-ai server if it doesn't exist
if (!mcpConfig.mcpServers["task-master-ai"]) {
mcpConfig.mcpServers["task-master-ai"] = newMCPServer["task-master-ai"];
}
// Write the updated configuration
fs.writeFileSync(
mcpJsonPath,
JSON.stringify(mcpConfig, null, 4)
);
} catch (error) {
// Create new configuration on error
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
}
} else {
// If mcp.json doesn't exist, create it
const newMCPConfig = {
"mcpServers": newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
}
}
// Create a temporary directory for testing
tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'task-master-test-'));
test('creates mcp.json when it does not exist', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
// Should create a proper structure with mcpServers key
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('mcpServers')
);
// Should reference npx command
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('npx')
);
});
test('updates existing mcp.json by adding new server', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override the existsSync mock to simulate mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Should preserve existing server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('existing-server')
);
// Should add our new server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
});
test('handles JSON parsing errors by creating new mcp.json', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override existsSync to say mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// But make readFileSync return invalid JSON
fs.readFileSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return '{invalid json';
}
return '{}';
});
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Should create a new valid JSON file with our server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
});
test('does not modify existing server configuration if it already exists', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override existsSync to say mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// Return JSON that already has task-master-ai
fs.readFileSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return JSON.stringify({
"mcpServers": {
"existing-server": {
"command": "node",
"args": ["server.js"]
},
"task-master-ai": {
"command": "custom",
"args": ["custom-args"]
}
}
});
}
return '{}';
});
// Spy to check what's written
const writeFileSyncSpy = jest.spyOn(fs, 'writeFileSync');
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Verify the written data contains the original taskmaster configuration
const dataWritten = JSON.parse(writeFileSyncSpy.mock.calls[0][1]);
expect(dataWritten.mcpServers["task-master-ai"].command).toBe("custom");
expect(dataWritten.mcpServers["task-master-ai"].args).toContain("custom-args");
});
test('creates the .cursor directory if it doesnt exist', () => {
// Arrange
const cursorDirPath = path.join(tempDir, '.cursor');
// Make sure it looks like the directory doesn't exist
fs.existsSync.mockReturnValue(false);
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
expect(fs.mkdirSync).toHaveBeenCalledWith(cursorDirPath, { recursive: true });
});
});
// Spy on fs methods
jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {});
jest.spyOn(fs, 'readFileSync').mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return JSON.stringify({
mcpServers: {
'existing-server': {
command: 'node',
args: ['server.js']
}
}
});
}
return '{}';
});
jest.spyOn(fs, 'existsSync').mockImplementation((filePath) => {
// Return true for specific paths to test different scenarios
if (filePath.toString().includes('package.json')) {
return true;
}
// Default to false for other paths
return false;
});
jest.spyOn(fs, 'mkdirSync').mockImplementation(() => {});
jest.spyOn(fs, 'copyFileSync').mockImplementation(() => {});
});
afterEach(() => {
// Clean up the temporary directory
try {
fs.rmSync(tempDir, { recursive: true, force: true });
} catch (err) {
console.error(`Error cleaning up: ${err.message}`);
}
});
// Test function that simulates the behavior of setupMCPConfiguration
function mockSetupMCPConfiguration(targetDir, projectName) {
const mcpDirPath = path.join(targetDir, '.cursor');
const mcpJsonPath = path.join(mcpDirPath, 'mcp.json');
// Create .cursor directory if it doesn't exist
if (!fs.existsSync(mcpDirPath)) {
fs.mkdirSync(mcpDirPath, { recursive: true });
}
// New MCP config to be added - references the installed package
const newMCPServer = {
'task-master-ai': {
command: 'npx',
args: ['task-master-ai', 'mcp-server']
}
};
// Check if mcp.json already exists
if (fs.existsSync(mcpJsonPath)) {
try {
// Read existing config
const mcpConfig = JSON.parse(fs.readFileSync(mcpJsonPath, 'utf8'));
// Initialize mcpServers if it doesn't exist
if (!mcpConfig.mcpServers) {
mcpConfig.mcpServers = {};
}
// Add the taskmaster-ai server if it doesn't exist
if (!mcpConfig.mcpServers['task-master-ai']) {
mcpConfig.mcpServers['task-master-ai'] =
newMCPServer['task-master-ai'];
}
// Write the updated configuration
fs.writeFileSync(mcpJsonPath, JSON.stringify(mcpConfig, null, 4));
} catch (error) {
// Create new configuration on error
const newMCPConfig = {
mcpServers: newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
}
} else {
// If mcp.json doesn't exist, create it
const newMCPConfig = {
mcpServers: newMCPServer
};
fs.writeFileSync(mcpJsonPath, JSON.stringify(newMCPConfig, null, 4));
}
}
test('creates mcp.json when it does not exist', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
// Should create a proper structure with mcpServers key
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('mcpServers')
);
// Should reference npx command
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('npx')
);
});
test('updates existing mcp.json by adding new server', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override the existsSync mock to simulate mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Should preserve existing server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('existing-server')
);
// Should add our new server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
});
test('handles JSON parsing errors by creating new mcp.json', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override existsSync to say mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// But make readFileSync return invalid JSON
fs.readFileSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return '{invalid json';
}
return '{}';
});
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Should create a new valid JSON file with our server
expect(fs.writeFileSync).toHaveBeenCalledWith(
mcpJsonPath,
expect.stringContaining('task-master-ai')
);
});
test('does not modify existing server configuration if it already exists', () => {
// Arrange
const mcpJsonPath = path.join(tempDir, '.cursor', 'mcp.json');
// Override existsSync to say mcp.json exists
fs.existsSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return true;
}
return false;
});
// Return JSON that already has task-master-ai
fs.readFileSync.mockImplementation((filePath) => {
if (filePath.toString().includes('mcp.json')) {
return JSON.stringify({
mcpServers: {
'existing-server': {
command: 'node',
args: ['server.js']
},
'task-master-ai': {
command: 'custom',
args: ['custom-args']
}
}
});
}
return '{}';
});
// Spy to check what's written
const writeFileSyncSpy = jest.spyOn(fs, 'writeFileSync');
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
// Verify the written data contains the original taskmaster configuration
const dataWritten = JSON.parse(writeFileSyncSpy.mock.calls[0][1]);
expect(dataWritten.mcpServers['task-master-ai'].command).toBe('custom');
expect(dataWritten.mcpServers['task-master-ai'].args).toContain(
'custom-args'
);
});
test('creates the .cursor directory if it doesnt exist', () => {
// Arrange
const cursorDirPath = path.join(tempDir, '.cursor');
// Make sure it looks like the directory doesn't exist
fs.existsSync.mockReturnValue(false);
// Act
mockSetupMCPConfiguration(tempDir, 'test-project');
// Assert
expect(fs.mkdirSync).toHaveBeenCalledWith(cursorDirPath, {
recursive: true
});
});
});

View File

@@ -7,114 +7,126 @@ import { toKebabCase } from '../../scripts/modules/utils.js';
// Create a test implementation of detectCamelCaseFlags
function testDetectCamelCaseFlags(args) {
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
}
describe('Kebab Case Validation', () => {
describe('toKebabCase', () => {
test('should convert camelCase to kebab-case', () => {
expect(toKebabCase('promptText')).toBe('prompt-text');
expect(toKebabCase('userID')).toBe('user-id');
expect(toKebabCase('numTasks')).toBe('num-tasks');
});
test('should handle already kebab-case strings', () => {
expect(toKebabCase('already-kebab-case')).toBe('already-kebab-case');
expect(toKebabCase('kebab-case')).toBe('kebab-case');
});
test('should handle single words', () => {
expect(toKebabCase('single')).toBe('single');
expect(toKebabCase('file')).toBe('file');
});
});
describe('toKebabCase', () => {
test('should convert camelCase to kebab-case', () => {
expect(toKebabCase('promptText')).toBe('prompt-text');
expect(toKebabCase('userID')).toBe('user-id');
expect(toKebabCase('numTasks')).toBe('num-tasks');
});
describe('detectCamelCaseFlags', () => {
test('should properly detect camelCase flags', () => {
const args = ['node', 'task-master', 'add-task', '--promptText=test', '--userID=123'];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(2);
expect(flags).toContainEqual({
original: 'promptText',
kebabCase: 'prompt-text'
});
expect(flags).toContainEqual({
original: 'userID',
kebabCase: 'user-id'
});
});
test('should not flag kebab-case or lowercase flags', () => {
const args = ['node', 'task-master', 'add-task', '--prompt=test', '--user-id=123'];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(0);
});
test('should not flag any single-word flags regardless of case', () => {
const args = [
'node',
'task-master',
'add-task',
'--prompt=test', // lowercase
'--PROMPT=test', // uppercase
'--Prompt=test', // mixed case
'--file=test', // lowercase
'--FILE=test', // uppercase
'--File=test' // mixed case
];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(0);
});
test('should handle already kebab-case strings', () => {
expect(toKebabCase('already-kebab-case')).toBe('already-kebab-case');
expect(toKebabCase('kebab-case')).toBe('kebab-case');
});
test('should handle mixed case flags correctly', () => {
const args = [
'node',
'task-master',
'add-task',
'--prompt=test', // single word, should pass
'--promptText=test', // camelCase, should flag
'--prompt-text=test', // kebab-case, should pass
'--ID=123', // single word, should pass
'--userId=123', // camelCase, should flag
'--user-id=123' // kebab-case, should pass
];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(2);
expect(flags).toContainEqual({
original: 'promptText',
kebabCase: 'prompt-text'
});
expect(flags).toContainEqual({
original: 'userId',
kebabCase: 'user-id'
});
});
});
});
test('should handle single words', () => {
expect(toKebabCase('single')).toBe('single');
expect(toKebabCase('file')).toBe('file');
});
});
describe('detectCamelCaseFlags', () => {
test('should properly detect camelCase flags', () => {
const args = [
'node',
'task-master',
'add-task',
'--promptText=test',
'--userID=123'
];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(2);
expect(flags).toContainEqual({
original: 'promptText',
kebabCase: 'prompt-text'
});
expect(flags).toContainEqual({
original: 'userID',
kebabCase: 'user-id'
});
});
test('should not flag kebab-case or lowercase flags', () => {
const args = [
'node',
'task-master',
'add-task',
'--prompt=test',
'--user-id=123'
];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(0);
});
test('should not flag any single-word flags regardless of case', () => {
const args = [
'node',
'task-master',
'add-task',
'--prompt=test', // lowercase
'--PROMPT=test', // uppercase
'--Prompt=test', // mixed case
'--file=test', // lowercase
'--FILE=test', // uppercase
'--File=test' // mixed case
];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(0);
});
test('should handle mixed case flags correctly', () => {
const args = [
'node',
'task-master',
'add-task',
'--prompt=test', // single word, should pass
'--promptText=test', // camelCase, should flag
'--prompt-text=test', // kebab-case, should pass
'--ID=123', // single word, should pass
'--userId=123', // camelCase, should flag
'--user-id=123' // kebab-case, should pass
];
const flags = testDetectCamelCaseFlags(args);
expect(flags).toHaveLength(2);
expect(flags).toContainEqual({
original: 'promptText',
kebabCase: 'prompt-text'
});
expect(flags).toContainEqual({
original: 'userId',
kebabCase: 'user-id'
});
});
});
});

View File

@@ -6,45 +6,45 @@ import { findTaskById } from '../../scripts/modules/utils.js';
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
describe('Task Finder', () => {
describe('findTaskById function', () => {
test('should find a task by numeric ID', () => {
const task = findTaskById(sampleTasks.tasks, 2);
expect(task).toBeDefined();
expect(task.id).toBe(2);
expect(task.title).toBe('Create Core Functionality');
});
describe('findTaskById function', () => {
test('should find a task by numeric ID', () => {
const task = findTaskById(sampleTasks.tasks, 2);
expect(task).toBeDefined();
expect(task.id).toBe(2);
expect(task.title).toBe('Create Core Functionality');
});
test('should find a task by string ID', () => {
const task = findTaskById(sampleTasks.tasks, '2');
expect(task).toBeDefined();
expect(task.id).toBe(2);
});
test('should find a task by string ID', () => {
const task = findTaskById(sampleTasks.tasks, '2');
expect(task).toBeDefined();
expect(task.id).toBe(2);
});
test('should find a subtask using dot notation', () => {
const subtask = findTaskById(sampleTasks.tasks, '3.1');
expect(subtask).toBeDefined();
expect(subtask.id).toBe(1);
expect(subtask.title).toBe('Create Header Component');
});
test('should find a subtask using dot notation', () => {
const subtask = findTaskById(sampleTasks.tasks, '3.1');
expect(subtask).toBeDefined();
expect(subtask.id).toBe(1);
expect(subtask.title).toBe('Create Header Component');
});
test('should return null for non-existent task ID', () => {
const task = findTaskById(sampleTasks.tasks, 99);
expect(task).toBeNull();
});
test('should return null for non-existent task ID', () => {
const task = findTaskById(sampleTasks.tasks, 99);
expect(task).toBeNull();
});
test('should return null for non-existent subtask ID', () => {
const subtask = findTaskById(sampleTasks.tasks, '3.99');
expect(subtask).toBeNull();
});
test('should return null for non-existent subtask ID', () => {
const subtask = findTaskById(sampleTasks.tasks, '3.99');
expect(subtask).toBeNull();
});
test('should return null for non-existent parent task ID in subtask notation', () => {
const subtask = findTaskById(sampleTasks.tasks, '99.1');
expect(subtask).toBeNull();
});
test('should return null for non-existent parent task ID in subtask notation', () => {
const subtask = findTaskById(sampleTasks.tasks, '99.1');
expect(subtask).toBeNull();
});
test('should return null when tasks array is empty', () => {
const task = findTaskById(emptySampleTasks.tasks, 1);
expect(task).toBeNull();
});
});
});
test('should return null when tasks array is empty', () => {
const task = findTaskById(emptySampleTasks.tasks, 1);
expect(task).toBeNull();
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -3,226 +3,228 @@
*/
import { jest } from '@jest/globals';
import {
getStatusWithColor,
formatDependenciesWithStatus,
createProgressBar,
getComplexityWithColor
import {
getStatusWithColor,
formatDependenciesWithStatus,
createProgressBar,
getComplexityWithColor
} from '../../scripts/modules/ui.js';
import { sampleTasks } from '../fixtures/sample-tasks.js';
// Mock dependencies
jest.mock('chalk', () => {
const origChalkFn = text => text;
const chalk = origChalkFn;
chalk.green = text => text; // Return text as-is for status functions
chalk.yellow = text => text;
chalk.red = text => text;
chalk.cyan = text => text;
chalk.blue = text => text;
chalk.gray = text => text;
chalk.white = text => text;
chalk.bold = text => text;
chalk.dim = text => text;
// Add hex and other methods
chalk.hex = () => origChalkFn;
chalk.rgb = () => origChalkFn;
return chalk;
const origChalkFn = (text) => text;
const chalk = origChalkFn;
chalk.green = (text) => text; // Return text as-is for status functions
chalk.yellow = (text) => text;
chalk.red = (text) => text;
chalk.cyan = (text) => text;
chalk.blue = (text) => text;
chalk.gray = (text) => text;
chalk.white = (text) => text;
chalk.bold = (text) => text;
chalk.dim = (text) => text;
// Add hex and other methods
chalk.hex = () => origChalkFn;
chalk.rgb = () => origChalkFn;
return chalk;
});
jest.mock('figlet', () => ({
textSync: jest.fn(() => 'Task Master Banner'),
textSync: jest.fn(() => 'Task Master Banner')
}));
jest.mock('boxen', () => jest.fn(text => `[boxed: ${text}]`));
jest.mock('boxen', () => jest.fn((text) => `[boxed: ${text}]`));
jest.mock('ora', () => jest.fn(() => ({
start: jest.fn(),
succeed: jest.fn(),
fail: jest.fn(),
stop: jest.fn(),
})));
jest.mock('ora', () =>
jest.fn(() => ({
start: jest.fn(),
succeed: jest.fn(),
fail: jest.fn(),
stop: jest.fn()
}))
);
jest.mock('cli-table3', () => jest.fn().mockImplementation(() => ({
push: jest.fn(),
toString: jest.fn(() => 'Table Content'),
})));
jest.mock('cli-table3', () =>
jest.fn().mockImplementation(() => ({
push: jest.fn(),
toString: jest.fn(() => 'Table Content')
}))
);
jest.mock('gradient-string', () => jest.fn(() => jest.fn(text => text)));
jest.mock('gradient-string', () => jest.fn(() => jest.fn((text) => text)));
jest.mock('../../scripts/modules/utils.js', () => ({
CONFIG: {
projectName: 'Test Project',
projectVersion: '1.0.0',
},
log: jest.fn(),
findTaskById: jest.fn(),
readJSON: jest.fn(),
readComplexityReport: jest.fn(),
truncate: jest.fn(text => text),
CONFIG: {
projectName: 'Test Project',
projectVersion: '1.0.0'
},
log: jest.fn(),
findTaskById: jest.fn(),
readJSON: jest.fn(),
readComplexityReport: jest.fn(),
truncate: jest.fn((text) => text)
}));
jest.mock('../../scripts/modules/task-manager.js', () => ({
findNextTask: jest.fn(),
analyzeTaskComplexity: jest.fn(),
findNextTask: jest.fn(),
analyzeTaskComplexity: jest.fn()
}));
describe('UI Module', () => {
beforeEach(() => {
jest.clearAllMocks();
});
beforeEach(() => {
jest.clearAllMocks();
});
describe('getStatusWithColor function', () => {
test('should return done status with emoji for console output', () => {
const result = getStatusWithColor('done');
expect(result).toMatch(/done/);
expect(result).toContain('✅');
});
describe('getStatusWithColor function', () => {
test('should return done status with emoji for console output', () => {
const result = getStatusWithColor('done');
expect(result).toMatch(/done/);
expect(result).toContain('✅');
});
test('should return pending status with emoji for console output', () => {
const result = getStatusWithColor('pending');
expect(result).toMatch(/pending/);
expect(result).toContain('⏱️');
});
test('should return pending status with emoji for console output', () => {
const result = getStatusWithColor('pending');
expect(result).toMatch(/pending/);
expect(result).toContain('⏱️');
});
test('should return deferred status with emoji for console output', () => {
const result = getStatusWithColor('deferred');
expect(result).toMatch(/deferred/);
expect(result).toContain('⏱️');
});
test('should return deferred status with emoji for console output', () => {
const result = getStatusWithColor('deferred');
expect(result).toMatch(/deferred/);
expect(result).toContain('⏱️');
});
test('should return in-progress status with emoji for console output', () => {
const result = getStatusWithColor('in-progress');
expect(result).toMatch(/in-progress/);
expect(result).toContain('🔄');
});
test('should return in-progress status with emoji for console output', () => {
const result = getStatusWithColor('in-progress');
expect(result).toMatch(/in-progress/);
expect(result).toContain('🔄');
});
test('should return unknown status with emoji for console output', () => {
const result = getStatusWithColor('unknown');
expect(result).toMatch(/unknown/);
expect(result).toContain('❌');
});
test('should use simple icons when forTable is true', () => {
const doneResult = getStatusWithColor('done', true);
expect(doneResult).toMatch(/done/);
expect(doneResult).toContain('✓');
const pendingResult = getStatusWithColor('pending', true);
expect(pendingResult).toMatch(/pending/);
expect(pendingResult).toContain('○');
const inProgressResult = getStatusWithColor('in-progress', true);
expect(inProgressResult).toMatch(/in-progress/);
expect(inProgressResult).toContain('►');
const deferredResult = getStatusWithColor('deferred', true);
expect(deferredResult).toMatch(/deferred/);
expect(deferredResult).toContain('x');
});
});
test('should return unknown status with emoji for console output', () => {
const result = getStatusWithColor('unknown');
expect(result).toMatch(/unknown/);
expect(result).toContain('❌');
});
describe('formatDependenciesWithStatus function', () => {
test('should format dependencies as plain IDs when forConsole is false (default)', () => {
const dependencies = [1, 2, 3];
const allTasks = [
{ id: 1, status: 'done' },
{ id: 2, status: 'pending' },
{ id: 3, status: 'deferred' }
];
test('should use simple icons when forTable is true', () => {
const doneResult = getStatusWithColor('done', true);
expect(doneResult).toMatch(/done/);
expect(doneResult).toContain('✓');
const result = formatDependenciesWithStatus(dependencies, allTasks);
// With recent changes, we expect just plain IDs when forConsole is false
expect(result).toBe('1, 2, 3');
});
const pendingResult = getStatusWithColor('pending', true);
expect(pendingResult).toMatch(/pending/);
expect(pendingResult).toContain('○');
test('should format dependencies with status indicators when forConsole is true', () => {
const dependencies = [1, 2, 3];
const allTasks = [
{ id: 1, status: 'done' },
{ id: 2, status: 'pending' },
{ id: 3, status: 'deferred' }
];
const result = formatDependenciesWithStatus(dependencies, allTasks, true);
// We can't test for exact color formatting due to our chalk mocks
// Instead, test that the result contains all the expected IDs
expect(result).toContain('1');
expect(result).toContain('2');
expect(result).toContain('3');
// Test that it's a comma-separated list
expect(result.split(', ').length).toBe(3);
});
const inProgressResult = getStatusWithColor('in-progress', true);
expect(inProgressResult).toMatch(/in-progress/);
expect(inProgressResult).toContain('►');
test('should return "None" for empty dependencies', () => {
const result = formatDependenciesWithStatus([], []);
expect(result).toBe('None');
});
const deferredResult = getStatusWithColor('deferred', true);
expect(deferredResult).toMatch(/deferred/);
expect(deferredResult).toContain('x');
});
});
test('should handle missing tasks in the task list', () => {
const dependencies = [1, 999];
const allTasks = [
{ id: 1, status: 'done' }
];
describe('formatDependenciesWithStatus function', () => {
test('should format dependencies as plain IDs when forConsole is false (default)', () => {
const dependencies = [1, 2, 3];
const allTasks = [
{ id: 1, status: 'done' },
{ id: 2, status: 'pending' },
{ id: 3, status: 'deferred' }
];
const result = formatDependenciesWithStatus(dependencies, allTasks);
expect(result).toBe('1, 999 (Not found)');
});
});
const result = formatDependenciesWithStatus(dependencies, allTasks);
describe('createProgressBar function', () => {
test('should create a progress bar with the correct percentage', () => {
const result = createProgressBar(50, 10);
expect(result).toBe('█████░░░░░ 50%');
});
// With recent changes, we expect just plain IDs when forConsole is false
expect(result).toBe('1, 2, 3');
});
test('should handle 0% progress', () => {
const result = createProgressBar(0, 10);
expect(result).toBe('░░░░░░░░░░ 0%');
});
test('should format dependencies with status indicators when forConsole is true', () => {
const dependencies = [1, 2, 3];
const allTasks = [
{ id: 1, status: 'done' },
{ id: 2, status: 'pending' },
{ id: 3, status: 'deferred' }
];
test('should handle 100% progress', () => {
const result = createProgressBar(100, 10);
expect(result).toBe('██████████ 100%');
});
const result = formatDependenciesWithStatus(dependencies, allTasks, true);
test('should handle invalid percentages by clamping', () => {
const result1 = createProgressBar(0, 10); // -10 should clamp to 0
expect(result1).toBe('░░░░░░░░░░ 0%');
const result2 = createProgressBar(100, 10); // 150 should clamp to 100
expect(result2).toBe('██████████ 100%');
});
});
// We can't test for exact color formatting due to our chalk mocks
// Instead, test that the result contains all the expected IDs
expect(result).toContain('1');
expect(result).toContain('2');
expect(result).toContain('3');
describe('getComplexityWithColor function', () => {
test('should return high complexity in red', () => {
const result = getComplexityWithColor(8);
expect(result).toMatch(/8/);
expect(result).toContain('🔴');
});
// Test that it's a comma-separated list
expect(result.split(', ').length).toBe(3);
});
test('should return medium complexity in yellow', () => {
const result = getComplexityWithColor(5);
expect(result).toMatch(/5/);
expect(result).toContain('🟡');
});
test('should return "None" for empty dependencies', () => {
const result = formatDependenciesWithStatus([], []);
expect(result).toBe('None');
});
test('should return low complexity in green', () => {
const result = getComplexityWithColor(3);
expect(result).toMatch(/3/);
expect(result).toContain('🟢');
});
test('should handle missing tasks in the task list', () => {
const dependencies = [1, 999];
const allTasks = [{ id: 1, status: 'done' }];
test('should handle non-numeric inputs', () => {
const result = getComplexityWithColor('high');
expect(result).toMatch(/high/);
expect(result).toContain('🔴');
});
});
});
const result = formatDependenciesWithStatus(dependencies, allTasks);
expect(result).toBe('1, 999 (Not found)');
});
});
describe('createProgressBar function', () => {
test('should create a progress bar with the correct percentage', () => {
const result = createProgressBar(50, 10);
expect(result).toBe('█████░░░░░ 50%');
});
test('should handle 0% progress', () => {
const result = createProgressBar(0, 10);
expect(result).toBe('░░░░░░░░░░ 0%');
});
test('should handle 100% progress', () => {
const result = createProgressBar(100, 10);
expect(result).toBe('██████████ 100%');
});
test('should handle invalid percentages by clamping', () => {
const result1 = createProgressBar(0, 10); // -10 should clamp to 0
expect(result1).toBe('░░░░░░░░░░ 0%');
const result2 = createProgressBar(100, 10); // 150 should clamp to 100
expect(result2).toBe('██████████ 100%');
});
});
describe('getComplexityWithColor function', () => {
test('should return high complexity in red', () => {
const result = getComplexityWithColor(8);
expect(result).toMatch(/8/);
expect(result).toContain('🔴');
});
test('should return medium complexity in yellow', () => {
const result = getComplexityWithColor(5);
expect(result).toMatch(/5/);
expect(result).toContain('🟡');
});
test('should return low complexity in green', () => {
const result = getComplexityWithColor(3);
expect(result).toMatch(/3/);
expect(result).toContain('🟢');
});
test('should handle non-numeric inputs', () => {
const result = getComplexityWithColor('high');
expect(result).toMatch(/high/);
expect(result).toContain('🔴');
});
});
});

File diff suppressed because it is too large Load Diff