Compare commits

...

4 Commits

Author SHA1 Message Date
Ralph Khreish
821c8f44ba chore: remove license duplicate 2025-04-09 00:45:39 +02:00
Ralph Khreish
ab6746a0c0 chore: add prettier package 2025-04-09 00:30:05 +02:00
Ralph Khreish
c02483bc41 chore: run npm run format 2025-04-09 00:30:05 +02:00
Ralph Khreish
3148b57f1b chore: add prettier config 2025-04-09 00:30:05 +02:00
113 changed files with 36259 additions and 32275 deletions

View File

@@ -1,20 +1,18 @@
{
"mcpServers": {
"taskmaster-ai": {
"command": "node",
"args": [
"./mcp-server/server.js"
],
"env": {
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
"MODEL": "claude-3-7-sonnet-20250219",
"PERPLEXITY_MODEL": "sonar-pro",
"MAX_TOKENS": 128000,
"TEMPERATURE": 0.2,
"DEFAULT_SUBTASKS": 5,
"DEFAULT_PRIORITY": "medium"
}
}
}
}
"mcpServers": {
"taskmaster-ai": {
"command": "node",
"args": ["./mcp-server/server.js"],
"env": {
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
"MODEL": "claude-3-7-sonnet-20250219",
"PERPLEXITY_MODEL": "sonar-pro",
"MAX_TOKENS": 128000,
"TEMPERATURE": 0.2,
"DEFAULT_SUBTASKS": 5,
"DEFAULT_PRIORITY": "medium"
}
}
}
}

View File

@@ -14,7 +14,7 @@ permissions:
contents: read
jobs:
build:
setup:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -24,21 +24,55 @@ jobs:
- uses: actions/setup-node@v4
with:
node-version: 20
cache: "npm"
cache: 'npm'
- name: Install Dependencies
id: install
run: npm ci
timeout-minutes: 2
- name: Cache node_modules
uses: actions/cache@v4
with:
path: |
node_modules
*/*/node_modules
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
path: node_modules
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
- name: Install Dependencies
run: npm ci
timeout-minutes: 2
format-check:
needs: setup
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
- name: Restore node_modules
uses: actions/cache@v4
with:
path: node_modules
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
- name: Format Check
run: npm run format-check
env:
FORCE_COLOR: 1
test:
needs: setup
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
- name: Restore node_modules
uses: actions/cache@v4
with:
path: node_modules
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
- name: Run Tests
run: |
@@ -47,13 +81,13 @@ jobs:
NODE_ENV: test
CI: true
FORCE_COLOR: 1
timeout-minutes: 15
timeout-minutes: 10
- name: Upload Test Results
if: always()
uses: actions/upload-artifact@v4
with:
name: test-results-node
name: test-results
path: |
test-results
coverage

View File

@@ -14,7 +14,7 @@ jobs:
- uses: actions/setup-node@v4
with:
node-version: 20
cache: "npm"
cache: 'npm'
- name: Cache node_modules
uses: actions/cache@v4

6
.prettierignore Normal file
View File

@@ -0,0 +1,6 @@
# Ignore artifacts:
build
coverage
.changeset
tasks
package-lock.json

11
.prettierrc Normal file
View File

@@ -0,0 +1,11 @@
{
"printWidth": 80,
"tabWidth": 2,
"useTabs": true,
"semi": true,
"singleQuote": true,
"trailingComma": "none",
"bracketSpacing": true,
"arrowParens": "always",
"endOfLine": "lf"
}

View File

@@ -1,90 +0,0 @@
# Dual License
This project is licensed under two separate licenses:
1. [Business Source License 1.1](#business-source-license-11) (BSL 1.1) for commercial use of Task Master itself
2. [Apache License 2.0](#apache-license-20) for all other uses
## Business Source License 1.1
Terms: https://mariadb.com/bsl11/
Licensed Work: Task Master AI
Additional Use Grant: You may use Task Master AI to create and commercialize your own projects and products.
Change Date: 2025-03-30
Change License: None
The Licensed Work is subject to the Business Source License 1.1. If you are interested in using the Licensed Work in a way that competes directly with Task Master, please contact the licensors.
### Licensor
- Eyal Toledano (GitHub: @eyaltoledano)
- Ralph (GitHub: @Crunchyman-ralph)
### Commercial Use Restrictions
This license explicitly restricts certain commercial uses of Task Master AI to the Licensors listed above. Restricted commercial uses include:
1. Creating commercial products or services that directly compete with Task Master AI
2. Selling Task Master AI itself as a service
3. Offering Task Master AI's functionality as a commercial managed service
4. Reselling or redistributing Task Master AI for a fee
### Explicitly Permitted Uses
The following uses are explicitly allowed under this license:
1. Using Task Master AI to create and commercialize your own projects
2. Using Task Master AI in commercial environments for internal development
3. Building and selling products or services that were created using Task Master AI
4. Using Task Master AI for commercial development as long as you're not selling Task Master AI itself
### Additional Terms
1. The right to commercialize Task Master AI itself is exclusively reserved for the Licensors
2. No party may create commercial products that directly compete with Task Master AI without explicit written permission
3. Forks of this repository are subject to the same restrictions regarding direct competition
4. Contributors agree that their contributions will be subject to this same dual licensing structure
## Apache License 2.0
For all uses other than those restricted above. See [APACHE-LICENSE](./APACHE-LICENSE) for the full license text.
### Permitted Use Definition
You may use Task Master AI for any purpose, including commercial purposes, as long as you are not:
1. Creating a direct competitor to Task Master AI
2. Selling Task Master AI itself as a service
3. Redistributing Task Master AI for a fee
### Requirements for Use
1. You must include appropriate copyright notices
2. You must state significant changes made to the software
3. You must preserve all license notices
## Questions and Commercial Licensing
For questions about licensing or to inquire about commercial use that may compete with Task Master, please contact:
- Eyal Toledano (GitHub: @eyaltoledano)
- Ralph (GitHub: @Crunchyman-ralph)
## Examples
### ✅ Allowed Uses
- Using Task Master to create a commercial SaaS product
- Using Task Master in your company for development
- Creating and selling products that were built using Task Master
- Using Task Master to generate code for commercial projects
- Offering consulting services where you use Task Master
### ❌ Restricted Uses
- Creating a competing AI task management tool
- Selling access to Task Master as a service
- Creating a hosted version of Task Master
- Reselling Task Master's functionality

View File

@@ -58,6 +58,7 @@ This will prompt you for project details and set up a new project with the neces
### Important Notes
1. **ES Modules Configuration:**
- This project uses ES Modules (ESM) instead of CommonJS.
- This is set via `"type": "module"` in your package.json.
- Use `import/export` syntax instead of `require()`.

View File

@@ -26,22 +26,22 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
```json
{
"mcpServers": {
"taskmaster-ai": {
"command": "npx",
"args": ["-y", "task-master-ai", "mcp-server"],
"env": {
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
"MODEL": "claude-3-7-sonnet-20250219",
"PERPLEXITY_MODEL": "sonar-pro",
"MAX_TOKENS": 128000,
"TEMPERATURE": 0.2,
"DEFAULT_SUBTASKS": 5,
"DEFAULT_PRIORITY": "medium"
}
}
}
"mcpServers": {
"taskmaster-ai": {
"command": "npx",
"args": ["-y", "task-master-ai", "mcp-server"],
"env": {
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
"MODEL": "claude-3-7-sonnet-20250219",
"PERPLEXITY_MODEL": "sonar-pro",
"MAX_TOKENS": 128000,
"TEMPERATURE": 0.2,
"DEFAULT_SUBTASKS": 5,
"DEFAULT_PRIORITY": "medium"
}
}
}
}
```

View File

@@ -21,9 +21,11 @@ In an AI-driven development process—particularly with tools like [Cursor](http
The script can be configured through environment variables in a `.env` file at the root of the project:
### Required Configuration
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
### Optional Configuration
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
@@ -38,9 +40,10 @@ The script can be configured through environment variables in a `.env` file at t
## How It Works
1. **`tasks.json`**:
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
1. **`tasks.json`**:
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
- Tasks can have `subtasks` for more detailed implementation steps.
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
@@ -50,7 +53,7 @@ The script can be configured through environment variables in a `.env` file at t
```bash
# If installed globally
task-master [command] [options]
# If using locally within the project
node scripts/dev.js [command] [options]
```
@@ -111,6 +114,7 @@ task-master update --file=custom-tasks.json --from=5 --prompt="Change database f
```
Notes:
- The `--prompt` parameter is required and should explain the changes or new context
- Only tasks that aren't marked as 'done' will be updated
- Tasks with ID >= the specified --from value will be updated
@@ -134,6 +138,7 @@ task-master set-status --id=1,2,3 --status=done
```
Notes:
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
- You can specify multiple task IDs by separating them with commas
@@ -183,6 +188,7 @@ task-master clear-subtasks --all
```
Notes:
- After clearing subtasks, task files are automatically regenerated
- This is useful when you want to regenerate subtasks with a different approach
- Can be combined with the `expand` command to immediately generate new subtasks
@@ -198,6 +204,7 @@ The script integrates with two AI services:
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
To use the Perplexity integration:
1. Obtain a Perplexity API key
2. Add `PERPLEXITY_API_KEY` to your `.env` file
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
@@ -206,6 +213,7 @@ To use the Perplexity integration:
## Logging
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
- `debug`: Detailed information, typically useful for troubleshooting
- `info`: Confirmation that things are working as expected (default)
- `warn`: Warning messages that don't prevent execution
@@ -228,17 +236,20 @@ task-master remove-dependency --id=<id> --depends-on=<id>
These commands:
1. **Allow precise dependency management**:
- Add dependencies between tasks with automatic validation
- Remove dependencies when they're no longer needed
- Update task files automatically after changes
2. **Include validation checks**:
- Prevent circular dependencies (a task depending on itself)
- Prevent duplicate dependencies
- Verify that both tasks exist before adding/removing dependencies
- Check if dependencies exist before attempting to remove them
3. **Provide clear feedback**:
- Success messages confirm when dependencies are added/removed
- Error messages explain why operations failed (if applicable)
@@ -263,6 +274,7 @@ task-master validate-dependencies --file=custom-tasks.json
```
This command:
- Scans all tasks and subtasks for non-existent dependencies
- Identifies potential self-dependencies (tasks referencing themselves)
- Reports all found issues without modifying files
@@ -284,6 +296,7 @@ task-master fix-dependencies --file=custom-tasks.json
```
This command:
1. **Validates all dependencies** across tasks and subtasks
2. **Automatically removes**:
- References to non-existent tasks and subtasks
@@ -321,6 +334,7 @@ task-master analyze-complexity --research
```
Notes:
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
- Tasks are scored on a scale of 1-10
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
@@ -345,33 +359,35 @@ task-master expand --id=8 --num=5 --prompt="Custom prompt"
```
When a complexity report exists:
- The `expand` command will use the recommended subtask count from the report (unless overridden)
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
- When using `--all`, tasks are sorted by complexity score (highest first)
- The `--research` flag is preserved from the complexity analysis to expansion
The output report structure is:
```json
{
"meta": {
"generatedAt": "2023-06-15T12:34:56.789Z",
"tasksAnalyzed": 20,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9.5,
"recommendedSubtasks": 6,
"expansionPrompt": "Create subtasks that handle detecting...",
"reasoning": "This task requires sophisticated logic...",
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
},
// More tasks sorted by complexity score (highest first)
]
"meta": {
"generatedAt": "2023-06-15T12:34:56.789Z",
"tasksAnalyzed": 20,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9.5,
"recommendedSubtasks": 6,
"expansionPrompt": "Create subtasks that handle detecting...",
"reasoning": "This task requires sophisticated logic...",
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
}
// More tasks sorted by complexity score (highest first)
]
}
```
@@ -438,4 +454,4 @@ This command:
- Commands for working with subtasks
- For subtasks, provides a link to view the parent task
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.

View File

@@ -20,11 +20,11 @@ const args = process.argv.slice(2);
// Spawn the init script with all arguments
const child = spawn('node', [initScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
stdio: 'inherit',
cwd: process.cwd()
});
// Handle exit
child.on('close', (code) => {
process.exit(code);
});
process.exit(code);
});

View File

@@ -44,30 +44,36 @@ const initScriptPath = resolve(__dirname, '../scripts/init.js');
// Helper function to run dev.js with arguments
function runDevScript(args) {
// Debug: Show the transformed arguments when DEBUG=1 is set
if (process.env.DEBUG === '1') {
console.error('\nDEBUG - CLI Wrapper Analysis:');
console.error('- Original command: ' + process.argv.join(' '));
console.error('- Transformed args: ' + args.join(' '));
console.error('- dev.js will receive: node ' + devScriptPath + ' ' + args.join(' ') + '\n');
}
// For testing: If TEST_MODE is set, just print args and exit
if (process.env.TEST_MODE === '1') {
console.log('Would execute:');
console.log(`node ${devScriptPath} ${args.join(' ')}`);
process.exit(0);
return;
}
const child = spawn('node', [devScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
// Debug: Show the transformed arguments when DEBUG=1 is set
if (process.env.DEBUG === '1') {
console.error('\nDEBUG - CLI Wrapper Analysis:');
console.error('- Original command: ' + process.argv.join(' '));
console.error('- Transformed args: ' + args.join(' '));
console.error(
'- dev.js will receive: node ' +
devScriptPath +
' ' +
args.join(' ') +
'\n'
);
}
// For testing: If TEST_MODE is set, just print args and exit
if (process.env.TEST_MODE === '1') {
console.log('Would execute:');
console.log(`node ${devScriptPath} ${args.join(' ')}`);
process.exit(0);
return;
}
const child = spawn('node', [devScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
}
// Helper function to detect camelCase and convert to kebab-case
@@ -79,228 +85,239 @@ const toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();
* @returns {Function} Wrapper action function
*/
function createDevScriptAction(commandName) {
return (options, cmd) => {
// Check for camelCase flags and error out with helpful message
const camelCaseFlags = detectCamelCaseFlags(process.argv);
// If camelCase flags were found, show error and exit
if (camelCaseFlags.length > 0) {
console.error('\nError: Please use kebab-case for CLI flags:');
camelCaseFlags.forEach(flag => {
console.error(` Instead of: --${flag.original}`);
console.error(` Use: --${flag.kebabCase}`);
});
console.error('\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n');
process.exit(1);
}
// Since we've ensured no camelCase flags, we can now just:
// 1. Start with the command name
const args = [commandName];
// 3. Get positional arguments and explicit flags from the command line
const commandArgs = [];
const positionals = new Set(); // Track positional args we've seen
// Find the command in raw process.argv to extract args
const commandIndex = process.argv.indexOf(commandName);
if (commandIndex !== -1) {
// Process all args after the command name
for (let i = commandIndex + 1; i < process.argv.length; i++) {
const arg = process.argv[i];
if (arg.startsWith('--')) {
// It's a flag - pass through as is
commandArgs.push(arg);
// Skip the next arg if this is a flag with a value (not --flag=value format)
if (!arg.includes('=') &&
i + 1 < process.argv.length &&
!process.argv[i+1].startsWith('--')) {
commandArgs.push(process.argv[++i]);
}
} else if (!positionals.has(arg)) {
// It's a positional argument we haven't seen
commandArgs.push(arg);
positionals.add(arg);
}
}
}
// Add all command line args we collected
args.push(...commandArgs);
// 4. Add default options from Commander if not specified on command line
// Track which options we've seen on the command line
const userOptions = new Set();
for (const arg of commandArgs) {
if (arg.startsWith('--')) {
// Extract option name (without -- and value)
const name = arg.split('=')[0].slice(2);
userOptions.add(name);
// Add the kebab-case version too, to prevent duplicates
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
userOptions.add(kebabName);
// Add the camelCase version as well
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) => letter.toUpperCase());
userOptions.add(camelName);
}
}
// Add Commander-provided defaults for options not specified by user
Object.entries(options).forEach(([key, value]) => {
// Debug output to see what keys we're getting
if (process.env.DEBUG === '1') {
console.error(`DEBUG - Processing option: ${key} = ${value}`);
}
return (options, cmd) => {
// Check for camelCase flags and error out with helpful message
const camelCaseFlags = detectCamelCaseFlags(process.argv);
// Special case for numTasks > num-tasks (a known problem case)
if (key === 'numTasks') {
if (process.env.DEBUG === '1') {
console.error('DEBUG - Converting numTasks to num-tasks');
}
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
args.push(`--num-tasks=${value}`);
}
return;
}
// Skip built-in Commander properties and options the user provided
if (['parent', 'commands', 'options', 'rawArgs'].includes(key) || userOptions.has(key)) {
return;
}
// Also check the kebab-case version of this key
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
if (userOptions.has(kebabKey)) {
return;
}
// Add default values, using kebab-case for the parameter name
if (value !== undefined) {
if (typeof value === 'boolean') {
if (value === true) {
args.push(`--${kebabKey}`);
} else if (value === false && key === 'generate') {
args.push('--skip-generate');
}
} else {
// Always use kebab-case for option names
args.push(`--${kebabKey}=${value}`);
}
}
});
// Special handling for parent parameter (uses -p)
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
args.push('-p', options.parent);
}
// Debug output for troubleshooting
if (process.env.DEBUG === '1') {
console.error('DEBUG - Command args:', commandArgs);
console.error('DEBUG - User options:', Array.from(userOptions));
console.error('DEBUG - Commander options:', options);
console.error('DEBUG - Final args:', args);
}
// Run the script with our processed args
runDevScript(args);
};
// If camelCase flags were found, show error and exit
if (camelCaseFlags.length > 0) {
console.error('\nError: Please use kebab-case for CLI flags:');
camelCaseFlags.forEach((flag) => {
console.error(` Instead of: --${flag.original}`);
console.error(` Use: --${flag.kebabCase}`);
});
console.error(
'\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n'
);
process.exit(1);
}
// Since we've ensured no camelCase flags, we can now just:
// 1. Start with the command name
const args = [commandName];
// 3. Get positional arguments and explicit flags from the command line
const commandArgs = [];
const positionals = new Set(); // Track positional args we've seen
// Find the command in raw process.argv to extract args
const commandIndex = process.argv.indexOf(commandName);
if (commandIndex !== -1) {
// Process all args after the command name
for (let i = commandIndex + 1; i < process.argv.length; i++) {
const arg = process.argv[i];
if (arg.startsWith('--')) {
// It's a flag - pass through as is
commandArgs.push(arg);
// Skip the next arg if this is a flag with a value (not --flag=value format)
if (
!arg.includes('=') &&
i + 1 < process.argv.length &&
!process.argv[i + 1].startsWith('--')
) {
commandArgs.push(process.argv[++i]);
}
} else if (!positionals.has(arg)) {
// It's a positional argument we haven't seen
commandArgs.push(arg);
positionals.add(arg);
}
}
}
// Add all command line args we collected
args.push(...commandArgs);
// 4. Add default options from Commander if not specified on command line
// Track which options we've seen on the command line
const userOptions = new Set();
for (const arg of commandArgs) {
if (arg.startsWith('--')) {
// Extract option name (without -- and value)
const name = arg.split('=')[0].slice(2);
userOptions.add(name);
// Add the kebab-case version too, to prevent duplicates
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
userOptions.add(kebabName);
// Add the camelCase version as well
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) =>
letter.toUpperCase()
);
userOptions.add(camelName);
}
}
// Add Commander-provided defaults for options not specified by user
Object.entries(options).forEach(([key, value]) => {
// Debug output to see what keys we're getting
if (process.env.DEBUG === '1') {
console.error(`DEBUG - Processing option: ${key} = ${value}`);
}
// Special case for numTasks > num-tasks (a known problem case)
if (key === 'numTasks') {
if (process.env.DEBUG === '1') {
console.error('DEBUG - Converting numTasks to num-tasks');
}
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
args.push(`--num-tasks=${value}`);
}
return;
}
// Skip built-in Commander properties and options the user provided
if (
['parent', 'commands', 'options', 'rawArgs'].includes(key) ||
userOptions.has(key)
) {
return;
}
// Also check the kebab-case version of this key
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
if (userOptions.has(kebabKey)) {
return;
}
// Add default values, using kebab-case for the parameter name
if (value !== undefined) {
if (typeof value === 'boolean') {
if (value === true) {
args.push(`--${kebabKey}`);
} else if (value === false && key === 'generate') {
args.push('--skip-generate');
}
} else {
// Always use kebab-case for option names
args.push(`--${kebabKey}=${value}`);
}
}
});
// Special handling for parent parameter (uses -p)
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
args.push('-p', options.parent);
}
// Debug output for troubleshooting
if (process.env.DEBUG === '1') {
console.error('DEBUG - Command args:', commandArgs);
console.error('DEBUG - User options:', Array.from(userOptions));
console.error('DEBUG - Commander options:', options);
console.error('DEBUG - Final args:', args);
}
// Run the script with our processed args
runDevScript(args);
};
}
// Special case for the 'init' command which uses a different script
function registerInitCommand(program) {
program
.command('init')
.description('Initialize a new project')
.option('-y, --yes', 'Skip prompts and use default values')
.option('-n, --name <name>', 'Project name')
.option('-d, --description <description>', 'Project description')
.option('-v, --version <version>', 'Project version')
.option('-a, --author <author>', 'Author name')
.option('--skip-install', 'Skip installing dependencies')
.option('--dry-run', 'Show what would be done without making changes')
.action((options) => {
// Pass through any options to the init script
const args = ['--yes', 'name', 'description', 'version', 'author', 'skip-install', 'dry-run']
.filter(opt => options[opt])
.map(opt => {
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
return `--${opt}`;
}
return `--${opt}=${options[opt]}`;
});
const child = spawn('node', [initScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('init')
.description('Initialize a new project')
.option('-y, --yes', 'Skip prompts and use default values')
.option('-n, --name <name>', 'Project name')
.option('-d, --description <description>', 'Project description')
.option('-v, --version <version>', 'Project version')
.option('-a, --author <author>', 'Author name')
.option('--skip-install', 'Skip installing dependencies')
.option('--dry-run', 'Show what would be done without making changes')
.action((options) => {
// Pass through any options to the init script
const args = [
'--yes',
'name',
'description',
'version',
'author',
'skip-install',
'dry-run'
]
.filter((opt) => options[opt])
.map((opt) => {
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
return `--${opt}`;
}
return `--${opt}=${options[opt]}`;
});
const child = spawn('node', [initScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
}
// Set up the command-line interface
const program = new Command();
program
.name('task-master')
.description('Claude Task Master CLI')
.version(version)
.addHelpText('afterAll', () => {
// Use the same help display function as dev.js for consistency
displayHelp();
return ''; // Return empty string to prevent commander's default help
});
.name('task-master')
.description('Claude Task Master CLI')
.version(version)
.addHelpText('afterAll', () => {
// Use the same help display function as dev.js for consistency
displayHelp();
return ''; // Return empty string to prevent commander's default help
});
// Add custom help option to directly call our help display
program.helpOption('-h, --help', 'Display help information');
program.on('--help', () => {
displayHelp();
displayHelp();
});
// Add special case commands
registerInitCommand(program);
program
.command('dev')
.description('Run the dev.js script')
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
runDevScript(args);
});
.command('dev')
.description('Run the dev.js script')
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
runDevScript(args);
});
// Use a temporary Command instance to get all command definitions
const tempProgram = new Command();
registerCommands(tempProgram);
// For each command in the temp instance, add a modified version to our actual program
tempProgram.commands.forEach(cmd => {
if (['init', 'dev'].includes(cmd.name())) {
// Skip commands we've already defined specially
return;
}
// Create a new command with the same name and description
const newCmd = program
.command(cmd.name())
.description(cmd.description());
// Copy all options
cmd.options.forEach(opt => {
newCmd.option(
opt.flags,
opt.description,
opt.defaultValue
);
});
// Set the action to proxy to dev.js
newCmd.action(createDevScriptAction(cmd.name()));
tempProgram.commands.forEach((cmd) => {
if (['init', 'dev'].includes(cmd.name())) {
// Skip commands we've already defined specially
return;
}
// Create a new command with the same name and description
const newCmd = program.command(cmd.name()).description(cmd.description());
// Copy all options
cmd.options.forEach((opt) => {
newCmd.option(opt.flags, opt.description, opt.defaultValue);
});
// Set the action to proxy to dev.js
newCmd.action(createDevScriptAction(cmd.name()));
});
// Parse the command line arguments
@@ -308,47 +325,56 @@ program.parse(process.argv);
// Add global error handling for unknown commands and options
process.on('uncaughtException', (err) => {
// Check if this is a commander.js unknown option error
if (err.code === 'commander.unknownOption') {
const option = err.message.match(/'([^']+)'/)?.[1];
const commandArg = process.argv.find(arg => !arg.startsWith('-') &&
arg !== 'task-master' &&
!arg.includes('/') &&
arg !== 'node');
const command = commandArg || 'unknown';
console.error(chalk.red(`Error: Unknown option '${option}'`));
console.error(chalk.yellow(`Run 'task-master ${command} --help' to see available options for this command`));
process.exit(1);
}
// Check if this is a commander.js unknown command error
if (err.code === 'commander.unknownCommand') {
const command = err.message.match(/'([^']+)'/)?.[1];
console.error(chalk.red(`Error: Unknown command '${command}'`));
console.error(chalk.yellow(`Run 'task-master --help' to see available commands`));
process.exit(1);
}
// Handle other uncaught exceptions
console.error(chalk.red(`Error: ${err.message}`));
if (process.env.DEBUG === '1') {
console.error(err);
}
process.exit(1);
// Check if this is a commander.js unknown option error
if (err.code === 'commander.unknownOption') {
const option = err.message.match(/'([^']+)'/)?.[1];
const commandArg = process.argv.find(
(arg) =>
!arg.startsWith('-') &&
arg !== 'task-master' &&
!arg.includes('/') &&
arg !== 'node'
);
const command = commandArg || 'unknown';
console.error(chalk.red(`Error: Unknown option '${option}'`));
console.error(
chalk.yellow(
`Run 'task-master ${command} --help' to see available options for this command`
)
);
process.exit(1);
}
// Check if this is a commander.js unknown command error
if (err.code === 'commander.unknownCommand') {
const command = err.message.match(/'([^']+)'/)?.[1];
console.error(chalk.red(`Error: Unknown command '${command}'`));
console.error(
chalk.yellow(`Run 'task-master --help' to see available commands`)
);
process.exit(1);
}
// Handle other uncaught exceptions
console.error(chalk.red(`Error: ${err.message}`));
if (process.env.DEBUG === '1') {
console.error(err);
}
process.exit(1);
});
// Show help if no command was provided (just 'task-master' with no args)
if (process.argv.length <= 2) {
displayBanner();
displayHelp();
process.exit(0);
displayBanner();
displayHelp();
process.exit(0);
}
// Add exports at the end of the file
if (typeof module !== 'undefined') {
module.exports = {
detectCamelCaseFlags
};
}
module.exports = {
detectCamelCaseFlags
};
}

View File

@@ -41,39 +41,39 @@ Core functions should follow this pattern to support both CLI and MCP use:
* @returns {Object|undefined} - Returns data when source is 'mcp'
*/
function exampleFunction(param1, param2, options = {}) {
try {
// Skip UI for MCP
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Processing operation...'));
}
// Do the core business logic
const result = doSomething(param1, param2);
// For MCP, return structured data
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// For CLI, display output
console.log(chalk.green('Operation completed successfully!'));
} catch (error) {
// Handle errors based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
// CLI error handling
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
try {
// Skip UI for MCP
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Processing operation...'));
}
// Do the core business logic
const result = doSomething(param1, param2);
// For MCP, return structured data
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// For CLI, display output
console.log(chalk.green('Operation completed successfully!'));
} catch (error) {
// Handle errors based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
// CLI error handling
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
}
```
@@ -89,17 +89,17 @@ export const simpleFunction = adaptForMcp(originalFunction);
// Split implementation - completely different code paths for CLI vs MCP
export const complexFunction = sourceSplitFunction(
// CLI version with UI
function(param1, param2) {
displayBanner();
console.log(`Processing ${param1}...`);
// ... CLI implementation
},
// MCP version with structured return
function(param1, param2, options = {}) {
// ... MCP implementation
return { success: true, data };
}
// CLI version with UI
function (param1, param2) {
displayBanner();
console.log(`Processing ${param1}...`);
// ... CLI implementation
},
// MCP version with structured return
function (param1, param2, options = {}) {
// ... MCP implementation
return { success: true, data };
}
);
```
@@ -110,7 +110,7 @@ When adding new features, follow these steps to ensure CLI and MCP compatibility
1. **Implement Core Logic** in the appropriate module file
2. **Add Source Parameter Support** using the pattern above
3. **Add to task-master-core.js** to make it available for direct import
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
5. **Create Tool Implementation** in `mcp-server/src/tools/`
6. **Register the Tool** in `mcp-server/src/tools/index.js`
@@ -119,39 +119,39 @@ When adding new features, follow these steps to ensure CLI and MCP compatibility
```javascript
// In scripts/modules/task-manager.js
export async function newFeature(param1, param2, options = {}) {
try {
// Source-specific UI
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Running new feature...'));
}
// Shared core logic
const result = processFeature(param1, param2);
// Source-specific return handling
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// CLI output
console.log(chalk.green('Feature completed successfully!'));
displayOutput(result);
} catch (error) {
// Error handling based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
try {
// Source-specific UI
if (options.source !== 'mcp') {
displayBanner();
console.log(chalk.blue('Running new feature...'));
}
// Shared core logic
const result = processFeature(param1, param2);
// Source-specific return handling
if (options.source === 'mcp') {
return {
success: true,
data: result
};
}
// CLI output
console.log(chalk.green('Feature completed successfully!'));
displayOutput(result);
} catch (error) {
// Error handling based on source
if (options.source === 'mcp') {
return {
success: false,
error: error.message
};
}
console.error(chalk.red(`Error: ${error.message}`));
process.exit(1);
}
}
```
@@ -163,12 +163,12 @@ import { newFeature } from '../../../scripts/modules/task-manager.js';
// Add to exports
export default {
// ... existing functions
async newFeature(args = {}, options = {}) {
const { param1, param2 } = args;
return executeFunction(newFeature, [param1, param2], options);
}
// ... existing functions
async newFeature(args = {}, options = {}) {
const { param1, param2 } = args;
return executeFunction(newFeature, [param1, param2], options);
}
};
```
@@ -177,8 +177,8 @@ export default {
```javascript
// In mcp-server/src/tools/utils.js
const commandMap = {
// ... existing mappings
'new-feature': 'newFeature'
// ... existing mappings
'new-feature': 'newFeature'
};
```
@@ -186,53 +186,53 @@ const commandMap = {
```javascript
// In mcp-server/src/tools/newFeature.js
import { z } from "zod";
import { z } from 'zod';
import {
executeTaskMasterCommand,
createContentResponse,
createErrorResponse,
} from "./utils.js";
executeTaskMasterCommand,
createContentResponse,
createErrorResponse
} from './utils.js';
export function registerNewFeatureTool(server) {
server.addTool({
name: "newFeature",
description: "Run the new feature",
parameters: z.object({
param1: z.string().describe("First parameter"),
param2: z.number().optional().describe("Second parameter"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z.string().describe("Root directory of the project")
}),
execute: async (args, { log }) => {
try {
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
server.addTool({
name: 'newFeature',
description: 'Run the new feature',
parameters: z.object({
param1: z.string().describe('First parameter'),
param2: z.number().optional().describe('Second parameter'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z.string().describe('Root directory of the project')
}),
execute: async (args, { log }) => {
try {
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
const cmdArgs = [];
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
if (args.file) cmdArgs.push(`--file=${args.file}`);
const cmdArgs = [];
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
if (args.file) cmdArgs.push(`--file=${args.file}`);
const projectRoot = args.projectRoot;
const projectRoot = args.projectRoot;
// Execute the command
const result = await executeTaskMasterCommand(
"new-feature",
log,
cmdArgs,
projectRoot
);
// Execute the command
const result = await executeTaskMasterCommand(
'new-feature',
log,
cmdArgs,
projectRoot
);
if (!result.success) {
throw new Error(result.error);
}
if (!result.success) {
throw new Error(result.error);
}
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error in new feature: ${error.message}`);
return createErrorResponse(`Error in new feature: ${error.message}`);
}
},
});
return createContentResponse(result.stdout);
} catch (error) {
log.error(`Error in new feature: ${error.message}`);
return createErrorResponse(`Error in new feature: ${error.message}`);
}
}
});
}
```
@@ -240,11 +240,11 @@ export function registerNewFeatureTool(server) {
```javascript
// In mcp-server/src/tools/index.js
import { registerNewFeatureTool } from "./newFeature.js";
import { registerNewFeatureTool } from './newFeature.js';
export function registerTaskMasterTools(server) {
// ... existing registrations
registerNewFeatureTool(server);
// ... existing registrations
registerNewFeatureTool(server);
}
```
@@ -266,4 +266,4 @@ node mcp-server/tests/test-command.js newFeature
2. **Structured Data for MCP** - Return clean JSON objects from MCP source functions
3. **Consistent Error Handling** - Standardize error formats for both interfaces
4. **Documentation** - Update MCP tool documentation when adding new features
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature

File diff suppressed because it is too large Load Diff

View File

@@ -6,57 +6,55 @@ This document provides examples of how to use the new AI client utilities with A
```javascript
// In your direct function implementation:
import {
getAnthropicClientForMCP,
getModelConfig,
handleClaudeError
import {
getAnthropicClientForMCP,
getModelConfig,
handleClaudeError
} from '../utils/ai-client-utils.js';
export async function someAiOperationDirect(args, log, context) {
try {
// Initialize Anthropic client with session from context
const client = getAnthropicClientForMCP(context.session, log);
// Get model configuration with defaults or session overrides
const modelConfig = getModelConfig(context.session);
// Make API call with proper error handling
try {
const response = await client.messages.create({
model: modelConfig.model,
max_tokens: modelConfig.maxTokens,
temperature: modelConfig.temperature,
messages: [
{ role: 'user', content: 'Your prompt here' }
]
});
return {
success: true,
data: response
};
} catch (apiError) {
// Use helper to get user-friendly error message
const friendlyMessage = handleClaudeError(apiError);
return {
success: false,
error: {
code: 'AI_API_ERROR',
message: friendlyMessage
}
};
}
} catch (error) {
// Handle client initialization errors
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: error.message
}
};
}
try {
// Initialize Anthropic client with session from context
const client = getAnthropicClientForMCP(context.session, log);
// Get model configuration with defaults or session overrides
const modelConfig = getModelConfig(context.session);
// Make API call with proper error handling
try {
const response = await client.messages.create({
model: modelConfig.model,
max_tokens: modelConfig.maxTokens,
temperature: modelConfig.temperature,
messages: [{ role: 'user', content: 'Your prompt here' }]
});
return {
success: true,
data: response
};
} catch (apiError) {
// Use helper to get user-friendly error message
const friendlyMessage = handleClaudeError(apiError);
return {
success: false,
error: {
code: 'AI_API_ERROR',
message: friendlyMessage
}
};
}
} catch (error) {
// Handle client initialization errors
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: error.message
}
};
}
}
```
@@ -64,86 +62,85 @@ export async function someAiOperationDirect(args, log, context) {
```javascript
// In your MCP tool implementation:
import { AsyncOperationManager, StatusCodes } from '../../utils/async-operation-manager.js';
import {
AsyncOperationManager,
StatusCodes
} from '../../utils/async-operation-manager.js';
import { someAiOperationDirect } from '../../core/direct-functions/some-ai-operation.js';
export async function someAiOperation(args, context) {
const { session, mcpLog } = context;
const log = mcpLog || console;
try {
// Create operation description
const operationDescription = `AI operation: ${args.someParam}`;
// Start async operation
const operation = AsyncOperationManager.createOperation(
operationDescription,
async (reportProgress) => {
try {
// Initial progress report
reportProgress({
progress: 0,
status: 'Starting AI operation...'
});
// Call direct function with session and progress reporting
const result = await someAiOperationDirect(
args,
log,
{
reportProgress,
mcpLog: log,
session
}
);
// Final progress update
reportProgress({
progress: 100,
status: result.success ? 'Operation completed' : 'Operation failed',
result: result.data,
error: result.error
});
return result;
} catch (error) {
// Handle errors in the operation
reportProgress({
progress: 100,
status: 'Operation failed',
error: {
message: error.message,
code: error.code || 'OPERATION_FAILED'
}
});
throw error;
}
}
);
// Return immediate response with operation ID
return {
status: StatusCodes.ACCEPTED,
body: {
success: true,
message: 'Operation started',
operationId: operation.id
}
};
} catch (error) {
// Handle errors in the MCP tool
log.error(`Error in someAiOperation: ${error.message}`);
return {
status: StatusCodes.INTERNAL_SERVER_ERROR,
body: {
success: false,
error: {
code: 'OPERATION_FAILED',
message: error.message
}
}
};
}
const { session, mcpLog } = context;
const log = mcpLog || console;
try {
// Create operation description
const operationDescription = `AI operation: ${args.someParam}`;
// Start async operation
const operation = AsyncOperationManager.createOperation(
operationDescription,
async (reportProgress) => {
try {
// Initial progress report
reportProgress({
progress: 0,
status: 'Starting AI operation...'
});
// Call direct function with session and progress reporting
const result = await someAiOperationDirect(args, log, {
reportProgress,
mcpLog: log,
session
});
// Final progress update
reportProgress({
progress: 100,
status: result.success ? 'Operation completed' : 'Operation failed',
result: result.data,
error: result.error
});
return result;
} catch (error) {
// Handle errors in the operation
reportProgress({
progress: 100,
status: 'Operation failed',
error: {
message: error.message,
code: error.code || 'OPERATION_FAILED'
}
});
throw error;
}
}
);
// Return immediate response with operation ID
return {
status: StatusCodes.ACCEPTED,
body: {
success: true,
message: 'Operation started',
operationId: operation.id
}
};
} catch (error) {
// Handle errors in the MCP tool
log.error(`Error in someAiOperation: ${error.message}`);
return {
status: StatusCodes.INTERNAL_SERVER_ERROR,
body: {
success: false,
error: {
code: 'OPERATION_FAILED',
message: error.message
}
}
};
}
}
```
@@ -151,58 +148,56 @@ export async function someAiOperation(args, context) {
```javascript
// In your direct function:
import {
getPerplexityClientForMCP,
getBestAvailableAIModel
import {
getPerplexityClientForMCP,
getBestAvailableAIModel
} from '../utils/ai-client-utils.js';
export async function researchOperationDirect(args, log, context) {
try {
// Get the best AI model for this operation based on needs
const { type, client } = await getBestAvailableAIModel(
context.session,
{ requiresResearch: true },
log
);
// Report which model we're using
if (context.reportProgress) {
await context.reportProgress({
progress: 10,
status: `Using ${type} model for research...`
});
}
// Make API call based on the model type
if (type === 'perplexity') {
// Call Perplexity
const response = await client.chat.completions.create({
model: context.session?.env?.PERPLEXITY_MODEL || 'sonar-medium-online',
messages: [
{ role: 'user', content: args.researchQuery }
],
temperature: 0.1
});
return {
success: true,
data: response.choices[0].message.content
};
} else {
// Call Claude as fallback
// (Implementation depends on specific needs)
// ...
}
} catch (error) {
// Handle errors
return {
success: false,
error: {
code: 'RESEARCH_ERROR',
message: error.message
}
};
}
try {
// Get the best AI model for this operation based on needs
const { type, client } = await getBestAvailableAIModel(
context.session,
{ requiresResearch: true },
log
);
// Report which model we're using
if (context.reportProgress) {
await context.reportProgress({
progress: 10,
status: `Using ${type} model for research...`
});
}
// Make API call based on the model type
if (type === 'perplexity') {
// Call Perplexity
const response = await client.chat.completions.create({
model: context.session?.env?.PERPLEXITY_MODEL || 'sonar-medium-online',
messages: [{ role: 'user', content: args.researchQuery }],
temperature: 0.1
});
return {
success: true,
data: response.choices[0].message.content
};
} else {
// Call Claude as fallback
// (Implementation depends on specific needs)
// ...
}
} catch (error) {
// Handle errors
return {
success: false,
error: {
code: 'RESEARCH_ERROR',
message: error.message
}
};
}
}
```
@@ -214,9 +209,9 @@ import { getModelConfig } from '../utils/ai-client-utils.js';
// Using custom defaults for a specific operation
const operationDefaults = {
model: 'claude-3-haiku-20240307', // Faster, smaller model
maxTokens: 1000, // Lower token limit
temperature: 0.2 // Lower temperature for more deterministic output
model: 'claude-3-haiku-20240307', // Faster, smaller model
maxTokens: 1000, // Lower token limit
temperature: 0.2 // Lower temperature for more deterministic output
};
// Get model config with operation-specific defaults
@@ -224,30 +219,34 @@ const modelConfig = getModelConfig(context.session, operationDefaults);
// Now use modelConfig in your API calls
const response = await client.messages.create({
model: modelConfig.model,
max_tokens: modelConfig.maxTokens,
temperature: modelConfig.temperature,
// Other parameters...
model: modelConfig.model,
max_tokens: modelConfig.maxTokens,
temperature: modelConfig.temperature
// Other parameters...
});
```
## Best Practices
1. **Error Handling**:
- Always use try/catch blocks around both client initialization and API calls
- Use `handleClaudeError` to provide user-friendly error messages
- Return standardized error objects with code and message
2. **Progress Reporting**:
- Report progress at key points (starting, processing, completing)
- Include meaningful status messages
- Include error details in progress reports when failures occur
3. **Session Handling**:
- Always pass the session from the context to the AI client getters
- Use `getModelConfig` to respect user settings from session
4. **Model Selection**:
- Use `getBestAvailableAIModel` when you need to select between different models
- Set `requiresResearch: true` when you need Perplexity capabilities
@@ -255,4 +254,4 @@ const response = await client.messages.create({
- Create descriptive operation names
- Handle all errors within the operation function
- Return standardized results from direct functions
- Return immediate responses with operation IDs
- Return immediate responses with operation IDs

View File

@@ -14,22 +14,22 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
```json
{
"mcpServers": {
"taskmaster-ai": {
"command": "npx",
"args": ["-y", "task-master-ai", "mcp-server"],
"env": {
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
"MODEL": "claude-3-7-sonnet-20250219",
"PERPLEXITY_MODEL": "sonar-pro",
"MAX_TOKENS": 128000,
"TEMPERATURE": 0.2,
"DEFAULT_SUBTASKS": 5,
"DEFAULT_PRIORITY": "medium"
}
}
}
"mcpServers": {
"taskmaster-ai": {
"command": "npx",
"args": ["-y", "task-master-ai", "mcp-server"],
"env": {
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
"MODEL": "claude-3-7-sonnet-20250219",
"PERPLEXITY_MODEL": "sonar-pro",
"MAX_TOKENS": 128000,
"TEMPERATURE": 0.2,
"DEFAULT_SUBTASKS": 5,
"DEFAULT_PRIORITY": "medium"
}
}
}
}
```

View File

@@ -1,41 +0,0 @@
import os
import json
# Path to Cursor's history folder
history_path = os.path.expanduser('~/Library/Application Support/Cursor/User/History')
# File to search for
target_file = 'tasks/tasks.json'
# Function to search through all entries.json files
def search_entries_for_file(history_path, target_file):
matching_folders = []
for folder in os.listdir(history_path):
folder_path = os.path.join(history_path, folder)
if not os.path.isdir(folder_path):
continue
# Look for entries.json
entries_file = os.path.join(folder_path, 'entries.json')
if not os.path.exists(entries_file):
continue
# Parse entries.json to find the resource key
with open(entries_file, 'r') as f:
data = json.load(f)
resource = data.get('resource', None)
if resource and target_file in resource:
matching_folders.append(folder_path)
return matching_folders
# Search for the target file
matching_folders = search_entries_for_file(history_path, target_file)
# Output the matching folders
if matching_folders:
print(f"Found {target_file} in the following folders:")
for folder in matching_folders:
print(folder)
else:
print(f"No matches found for {target_file}.")

190
index.js
View File

@@ -41,27 +41,27 @@ export const devScriptPath = resolve(__dirname, './scripts/dev.js');
// Export a function to initialize a new project programmatically
export const initProject = async (options = {}) => {
const init = await import('./scripts/init.js');
return init.initializeProject(options);
const init = await import('./scripts/init.js');
return init.initializeProject(options);
};
// Export a function to run init as a CLI command
export const runInitCLI = async () => {
// Using spawn to ensure proper handling of stdio and process exit
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
stdio: 'inherit',
cwd: process.cwd()
});
return new Promise((resolve, reject) => {
child.on('close', (code) => {
if (code === 0) {
resolve();
} else {
reject(new Error(`Init script exited with code ${code}`));
}
});
});
// Using spawn to ensure proper handling of stdio and process exit
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
stdio: 'inherit',
cwd: process.cwd()
});
return new Promise((resolve, reject) => {
child.on('close', (code) => {
if (code === 0) {
resolve();
} else {
reject(new Error(`Init script exited with code ${code}`));
}
});
});
};
// Export version information
@@ -69,81 +69,81 @@ export const version = packageJson.version;
// CLI implementation
if (import.meta.url === `file://${process.argv[1]}`) {
const program = new Command();
program
.name('task-master')
.description('Claude Task Master CLI')
.version(version);
program
.command('init')
.description('Initialize a new project')
.action(() => {
runInitCLI().catch(err => {
console.error('Init failed:', err.message);
process.exit(1);
});
});
program
.command('dev')
.description('Run the dev.js script')
.allowUnknownOption(true)
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
const child = spawn('node', [devScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
// Add shortcuts for common dev.js commands
program
.command('list')
.description('List all tasks')
.action(() => {
const child = spawn('node', [devScriptPath, 'list'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('next')
.description('Show the next task to work on')
.action(() => {
const child = spawn('node', [devScriptPath, 'next'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('generate')
.description('Generate task files')
.action(() => {
const child = spawn('node', [devScriptPath, 'generate'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program.parse(process.argv);
}
const program = new Command();
program
.name('task-master')
.description('Claude Task Master CLI')
.version(version);
program
.command('init')
.description('Initialize a new project')
.action(() => {
runInitCLI().catch((err) => {
console.error('Init failed:', err.message);
process.exit(1);
});
});
program
.command('dev')
.description('Run the dev.js script')
.allowUnknownOption(true)
.action(() => {
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
const child = spawn('node', [devScriptPath, ...args], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
// Add shortcuts for common dev.js commands
program
.command('list')
.description('List all tasks')
.action(() => {
const child = spawn('node', [devScriptPath, 'list'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('next')
.description('Show the next task to work on')
.action(() => {
const child = spawn('node', [devScriptPath, 'next'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program
.command('generate')
.description('Generate task files')
.action(() => {
const child = spawn('node', [devScriptPath, 'generate'], {
stdio: 'inherit',
cwd: process.cwd()
});
child.on('close', (code) => {
process.exit(code);
});
});
program.parse(process.argv);
}

View File

@@ -1,56 +1,56 @@
export default {
// Use Node.js environment for testing
testEnvironment: 'node',
// Automatically clear mock calls between every test
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
collectCoverage: false,
// The directory where Jest should output its coverage files
coverageDirectory: 'coverage',
// A list of paths to directories that Jest should use to search for files in
roots: ['<rootDir>/tests'],
// The glob patterns Jest uses to detect test files
testMatch: [
'**/__tests__/**/*.js',
'**/?(*.)+(spec|test).js',
'**/tests/*.test.js'
],
// Transform files
transform: {},
// Disable transformations for node_modules
transformIgnorePatterns: ['/node_modules/'],
// Set moduleNameMapper for absolute paths
moduleNameMapper: {
'^@/(.*)$': '<rootDir>/$1'
},
// Setup module aliases
moduleDirectories: ['node_modules', '<rootDir>'],
// Configure test coverage thresholds
coverageThreshold: {
global: {
branches: 80,
functions: 80,
lines: 80,
statements: 80
}
},
// Generate coverage report in these formats
coverageReporters: ['text', 'lcov'],
// Verbose output
verbose: true,
// Setup file
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
};
// Use Node.js environment for testing
testEnvironment: 'node',
// Automatically clear mock calls between every test
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
collectCoverage: false,
// The directory where Jest should output its coverage files
coverageDirectory: 'coverage',
// A list of paths to directories that Jest should use to search for files in
roots: ['<rootDir>/tests'],
// The glob patterns Jest uses to detect test files
testMatch: [
'**/__tests__/**/*.js',
'**/?(*.)+(spec|test).js',
'**/tests/*.test.js'
],
// Transform files
transform: {},
// Disable transformations for node_modules
transformIgnorePatterns: ['/node_modules/'],
// Set moduleNameMapper for absolute paths
moduleNameMapper: {
'^@/(.*)$': '<rootDir>/$1'
},
// Setup module aliases
moduleDirectories: ['node_modules', '<rootDir>'],
// Configure test coverage thresholds
coverageThreshold: {
global: {
branches: 80,
functions: 80,
lines: 80,
statements: 80
}
},
// Generate coverage report in these formats
coverageReporters: ['text', 'lcov'],
// Verbose output
verbose: true,
// Setup file
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
};

View File

@@ -1,8 +1,8 @@
#!/usr/bin/env node
import TaskMasterMCPServer from "./src/index.js";
import dotenv from "dotenv";
import logger from "./src/logger.js";
import TaskMasterMCPServer from './src/index.js';
import dotenv from 'dotenv';
import logger from './src/logger.js';
// Load environment variables
dotenv.config();
@@ -11,25 +11,25 @@ dotenv.config();
* Start the MCP server
*/
async function startServer() {
const server = new TaskMasterMCPServer();
const server = new TaskMasterMCPServer();
// Handle graceful shutdown
process.on("SIGINT", async () => {
await server.stop();
process.exit(0);
});
// Handle graceful shutdown
process.on('SIGINT', async () => {
await server.stop();
process.exit(0);
});
process.on("SIGTERM", async () => {
await server.stop();
process.exit(0);
});
process.on('SIGTERM', async () => {
await server.stop();
process.exit(0);
});
try {
await server.start();
} catch (error) {
logger.error(`Failed to start MCP server: ${error.message}`);
process.exit(1);
}
try {
await server.start();
} catch (error) {
logger.error(`Failed to start MCP server: ${error.message}`);
process.exit(1);
}
}
// Start the server

View File

@@ -2,84 +2,90 @@ import { jest } from '@jest/globals';
import { ContextManager } from '../context-manager.js';
describe('ContextManager', () => {
let contextManager;
let contextManager;
beforeEach(() => {
contextManager = new ContextManager({
maxCacheSize: 10,
ttl: 1000, // 1 second for testing
maxContextSize: 1000
});
});
beforeEach(() => {
contextManager = new ContextManager({
maxCacheSize: 10,
ttl: 1000, // 1 second for testing
maxContextSize: 1000
});
});
describe('getContext', () => {
it('should create a new context when not in cache', async () => {
const context = await contextManager.getContext('test-id', { test: true });
expect(context.id).toBe('test-id');
expect(context.metadata.test).toBe(true);
expect(contextManager.stats.misses).toBe(1);
expect(contextManager.stats.hits).toBe(0);
});
describe('getContext', () => {
it('should create a new context when not in cache', async () => {
const context = await contextManager.getContext('test-id', {
test: true
});
expect(context.id).toBe('test-id');
expect(context.metadata.test).toBe(true);
expect(contextManager.stats.misses).toBe(1);
expect(contextManager.stats.hits).toBe(0);
});
it('should return cached context when available', async () => {
// First call creates the context
await contextManager.getContext('test-id', { test: true });
// Second call should hit cache
const context = await contextManager.getContext('test-id', { test: true });
expect(context.id).toBe('test-id');
expect(context.metadata.test).toBe(true);
expect(contextManager.stats.hits).toBe(1);
expect(contextManager.stats.misses).toBe(1);
});
it('should return cached context when available', async () => {
// First call creates the context
await contextManager.getContext('test-id', { test: true });
it('should respect TTL settings', async () => {
// Create context
await contextManager.getContext('test-id', { test: true });
// Wait for TTL to expire
await new Promise(resolve => setTimeout(resolve, 1100));
// Should create new context
await contextManager.getContext('test-id', { test: true });
expect(contextManager.stats.misses).toBe(2);
expect(contextManager.stats.hits).toBe(0);
});
});
// Second call should hit cache
const context = await contextManager.getContext('test-id', {
test: true
});
expect(context.id).toBe('test-id');
expect(context.metadata.test).toBe(true);
expect(contextManager.stats.hits).toBe(1);
expect(contextManager.stats.misses).toBe(1);
});
describe('updateContext', () => {
it('should update existing context metadata', async () => {
await contextManager.getContext('test-id', { initial: true });
const updated = await contextManager.updateContext('test-id', { updated: true });
expect(updated.metadata.initial).toBe(true);
expect(updated.metadata.updated).toBe(true);
});
});
it('should respect TTL settings', async () => {
// Create context
await contextManager.getContext('test-id', { test: true });
describe('invalidateContext', () => {
it('should remove context from cache', async () => {
await contextManager.getContext('test-id', { test: true });
contextManager.invalidateContext('test-id', { test: true });
// Should be a cache miss
await contextManager.getContext('test-id', { test: true });
expect(contextManager.stats.invalidations).toBe(1);
expect(contextManager.stats.misses).toBe(2);
});
});
// Wait for TTL to expire
await new Promise((resolve) => setTimeout(resolve, 1100));
describe('getStats', () => {
it('should return current cache statistics', async () => {
await contextManager.getContext('test-id', { test: true });
const stats = contextManager.getStats();
expect(stats.hits).toBe(0);
expect(stats.misses).toBe(1);
expect(stats.invalidations).toBe(0);
expect(stats.size).toBe(1);
expect(stats.maxSize).toBe(10);
expect(stats.ttl).toBe(1000);
});
});
});
// Should create new context
await contextManager.getContext('test-id', { test: true });
expect(contextManager.stats.misses).toBe(2);
expect(contextManager.stats.hits).toBe(0);
});
});
describe('updateContext', () => {
it('should update existing context metadata', async () => {
await contextManager.getContext('test-id', { initial: true });
const updated = await contextManager.updateContext('test-id', {
updated: true
});
expect(updated.metadata.initial).toBe(true);
expect(updated.metadata.updated).toBe(true);
});
});
describe('invalidateContext', () => {
it('should remove context from cache', async () => {
await contextManager.getContext('test-id', { test: true });
contextManager.invalidateContext('test-id', { test: true });
// Should be a cache miss
await contextManager.getContext('test-id', { test: true });
expect(contextManager.stats.invalidations).toBe(1);
expect(contextManager.stats.misses).toBe(2);
});
});
describe('getStats', () => {
it('should return current cache statistics', async () => {
await contextManager.getContext('test-id', { test: true });
const stats = contextManager.getStats();
expect(stats.hits).toBe(0);
expect(stats.misses).toBe(1);
expect(stats.invalidations).toBe(0);
expect(stats.size).toBe(1);
expect(stats.maxSize).toBe(10);
expect(stats.ttl).toBe(1000);
});
});
});

View File

@@ -15,156 +15,157 @@ import { LRUCache } from 'lru-cache';
*/
export class ContextManager {
/**
* Create a new ContextManager instance
* @param {ContextManagerConfig} config - Configuration options
*/
constructor(config = {}) {
this.config = {
maxCacheSize: config.maxCacheSize || 1000,
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
maxContextSize: config.maxContextSize || 4000
};
/**
* Create a new ContextManager instance
* @param {ContextManagerConfig} config - Configuration options
*/
constructor(config = {}) {
this.config = {
maxCacheSize: config.maxCacheSize || 1000,
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
maxContextSize: config.maxContextSize || 4000
};
// Initialize LRU cache for context data
this.cache = new LRUCache({
max: this.config.maxCacheSize,
ttl: this.config.ttl,
updateAgeOnGet: true
});
// Initialize LRU cache for context data
this.cache = new LRUCache({
max: this.config.maxCacheSize,
ttl: this.config.ttl,
updateAgeOnGet: true
});
// Cache statistics
this.stats = {
hits: 0,
misses: 0,
invalidations: 0
};
}
// Cache statistics
this.stats = {
hits: 0,
misses: 0,
invalidations: 0
};
}
/**
* Create a new context or retrieve from cache
* @param {string} contextId - Unique identifier for the context
* @param {Object} metadata - Additional metadata for the context
* @returns {Object} Context object with metadata
*/
async getContext(contextId, metadata = {}) {
const cacheKey = this._getCacheKey(contextId, metadata);
// Try to get from cache first
const cached = this.cache.get(cacheKey);
if (cached) {
this.stats.hits++;
return cached;
}
/**
* Create a new context or retrieve from cache
* @param {string} contextId - Unique identifier for the context
* @param {Object} metadata - Additional metadata for the context
* @returns {Object} Context object with metadata
*/
async getContext(contextId, metadata = {}) {
const cacheKey = this._getCacheKey(contextId, metadata);
this.stats.misses++;
// Create new context if not in cache
const context = {
id: contextId,
metadata: {
...metadata,
created: new Date().toISOString()
}
};
// Try to get from cache first
const cached = this.cache.get(cacheKey);
if (cached) {
this.stats.hits++;
return cached;
}
// Cache the new context
this.cache.set(cacheKey, context);
return context;
}
this.stats.misses++;
/**
* Update an existing context
* @param {string} contextId - Context identifier
* @param {Object} updates - Updates to apply to the context
* @returns {Object} Updated context
*/
async updateContext(contextId, updates) {
const context = await this.getContext(contextId);
// Apply updates to context
Object.assign(context.metadata, updates);
// Update cache
const cacheKey = this._getCacheKey(contextId, context.metadata);
this.cache.set(cacheKey, context);
return context;
}
// Create new context if not in cache
const context = {
id: contextId,
metadata: {
...metadata,
created: new Date().toISOString()
}
};
/**
* Invalidate a context in the cache
* @param {string} contextId - Context identifier
* @param {Object} metadata - Metadata used in the cache key
*/
invalidateContext(contextId, metadata = {}) {
const cacheKey = this._getCacheKey(contextId, metadata);
this.cache.delete(cacheKey);
this.stats.invalidations++;
}
// Cache the new context
this.cache.set(cacheKey, context);
/**
* Get cached data associated with a specific key.
* Increments cache hit stats if found.
* @param {string} key - The cache key.
* @returns {any | undefined} The cached data or undefined if not found/expired.
*/
getCachedData(key) {
const cached = this.cache.get(key);
if (cached !== undefined) { // Check for undefined specifically, as null/false might be valid cached values
this.stats.hits++;
return cached;
}
this.stats.misses++;
return undefined;
}
return context;
}
/**
* Set data in the cache with a specific key.
* @param {string} key - The cache key.
* @param {any} data - The data to cache.
*/
setCachedData(key, data) {
this.cache.set(key, data);
}
/**
* Update an existing context
* @param {string} contextId - Context identifier
* @param {Object} updates - Updates to apply to the context
* @returns {Object} Updated context
*/
async updateContext(contextId, updates) {
const context = await this.getContext(contextId);
/**
* Invalidate a specific cache key.
* Increments invalidation stats.
* @param {string} key - The cache key to invalidate.
*/
invalidateCacheKey(key) {
this.cache.delete(key);
this.stats.invalidations++;
}
// Apply updates to context
Object.assign(context.metadata, updates);
/**
* Get cache statistics
* @returns {Object} Cache statistics
*/
getStats() {
return {
hits: this.stats.hits,
misses: this.stats.misses,
invalidations: this.stats.invalidations,
size: this.cache.size,
maxSize: this.config.maxCacheSize,
ttl: this.config.ttl
};
}
// Update cache
const cacheKey = this._getCacheKey(contextId, context.metadata);
this.cache.set(cacheKey, context);
/**
* Generate a cache key from context ID and metadata
* @private
* @deprecated No longer used for direct cache key generation outside the manager.
* Prefer generating specific keys in calling functions.
*/
_getCacheKey(contextId, metadata) {
// Kept for potential backward compatibility or internal use if needed later.
return `${contextId}:${JSON.stringify(metadata)}`;
}
return context;
}
/**
* Invalidate a context in the cache
* @param {string} contextId - Context identifier
* @param {Object} metadata - Metadata used in the cache key
*/
invalidateContext(contextId, metadata = {}) {
const cacheKey = this._getCacheKey(contextId, metadata);
this.cache.delete(cacheKey);
this.stats.invalidations++;
}
/**
* Get cached data associated with a specific key.
* Increments cache hit stats if found.
* @param {string} key - The cache key.
* @returns {any | undefined} The cached data or undefined if not found/expired.
*/
getCachedData(key) {
const cached = this.cache.get(key);
if (cached !== undefined) {
// Check for undefined specifically, as null/false might be valid cached values
this.stats.hits++;
return cached;
}
this.stats.misses++;
return undefined;
}
/**
* Set data in the cache with a specific key.
* @param {string} key - The cache key.
* @param {any} data - The data to cache.
*/
setCachedData(key, data) {
this.cache.set(key, data);
}
/**
* Invalidate a specific cache key.
* Increments invalidation stats.
* @param {string} key - The cache key to invalidate.
*/
invalidateCacheKey(key) {
this.cache.delete(key);
this.stats.invalidations++;
}
/**
* Get cache statistics
* @returns {Object} Cache statistics
*/
getStats() {
return {
hits: this.stats.hits,
misses: this.stats.misses,
invalidations: this.stats.invalidations,
size: this.cache.size,
maxSize: this.config.maxCacheSize,
ttl: this.config.ttl
};
}
/**
* Generate a cache key from context ID and metadata
* @private
* @deprecated No longer used for direct cache key generation outside the manager.
* Prefer generating specific keys in calling functions.
*/
_getCacheKey(contextId, metadata) {
// Kept for potential backward compatibility or internal use if needed later.
return `${contextId}:${JSON.stringify(metadata)}`;
}
}
// Export a singleton instance with default config
export const contextManager = new ContextManager();
export const contextManager = new ContextManager();

View File

@@ -5,11 +5,14 @@
import { addDependency } from '../../../../scripts/modules/dependency-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Direct function wrapper for addDependency with error handling.
*
*
* @param {Object} args - Command arguments
* @param {string|number} args.id - Task ID to add dependency to
* @param {string|number} args.dependsOn - Task ID that will become a dependency
@@ -19,67 +22,75 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
* @returns {Promise<Object>} - Result object with success status and data/error information
*/
export async function addDependencyDirect(args, log) {
try {
log.info(`Adding dependency with args: ${JSON.stringify(args)}`);
// Validate required parameters
if (!args.id) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID (id) is required'
}
};
}
if (!args.dependsOn) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Dependency ID (dependsOn) is required'
}
};
}
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Format IDs for the core function
const taskId = args.id.includes && args.id.includes('.') ? args.id : parseInt(args.id, 10);
const dependencyId = args.dependsOn.includes && args.dependsOn.includes('.') ? args.dependsOn : parseInt(args.dependsOn, 10);
log.info(`Adding dependency: task ${taskId} will depend on ${dependencyId}`);
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the core function
await addDependency(tasksPath, taskId, dependencyId);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,
taskId: taskId,
dependencyId: dependencyId
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in addDependencyDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}
try {
log.info(`Adding dependency with args: ${JSON.stringify(args)}`);
// Validate required parameters
if (!args.id) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID (id) is required'
}
};
}
if (!args.dependsOn) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Dependency ID (dependsOn) is required'
}
};
}
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Format IDs for the core function
const taskId =
args.id.includes && args.id.includes('.')
? args.id
: parseInt(args.id, 10);
const dependencyId =
args.dependsOn.includes && args.dependsOn.includes('.')
? args.dependsOn
: parseInt(args.dependsOn, 10);
log.info(
`Adding dependency: task ${taskId} will depend on ${dependencyId}`
);
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the core function
await addDependency(tasksPath, taskId, dependencyId);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,
taskId: taskId,
dependencyId: dependencyId
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in addDependencyDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}

View File

@@ -4,7 +4,10 @@
import { addSubtask } from '../../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Add a subtask to an existing task
@@ -23,106 +26,118 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
* @returns {Promise<{success: boolean, data?: Object, error?: string}>}
*/
export async function addSubtaskDirect(args, log) {
try {
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
if (!args.id) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Parent task ID is required'
}
};
}
// Either taskId or title must be provided
if (!args.taskId && !args.title) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Either taskId or title must be provided'
}
};
}
try {
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Parse dependencies if provided
let dependencies = [];
if (args.dependencies) {
dependencies = args.dependencies.split(',').map(id => {
// Handle both regular IDs and dot notation
return id.includes('.') ? id.trim() : parseInt(id.trim(), 10);
});
}
// Convert existingTaskId to a number if provided
const existingTaskId = args.taskId ? parseInt(args.taskId, 10) : null;
// Convert parent ID to a number
const parentId = parseInt(args.id, 10);
// Determine if we should generate files
const generateFiles = !args.skipGenerate;
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Case 1: Convert existing task to subtask
if (existingTaskId) {
log.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);
const result = await addSubtask(tasksPath, parentId, existingTaskId, null, generateFiles);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,
subtask: result
}
};
}
// Case 2: Create new subtask
else {
log.info(`Creating new subtask for parent task ${parentId}`);
const newSubtaskData = {
title: args.title,
description: args.description || '',
details: args.details || '',
status: args.status || 'pending',
dependencies: dependencies
};
const result = await addSubtask(tasksPath, parentId, null, newSubtaskData, generateFiles);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: `New subtask ${parentId}.${result.id} successfully created`,
subtask: result
}
};
}
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in addSubtaskDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}
if (!args.id) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Parent task ID is required'
}
};
}
// Either taskId or title must be provided
if (!args.taskId && !args.title) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Either taskId or title must be provided'
}
};
}
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Parse dependencies if provided
let dependencies = [];
if (args.dependencies) {
dependencies = args.dependencies.split(',').map((id) => {
// Handle both regular IDs and dot notation
return id.includes('.') ? id.trim() : parseInt(id.trim(), 10);
});
}
// Convert existingTaskId to a number if provided
const existingTaskId = args.taskId ? parseInt(args.taskId, 10) : null;
// Convert parent ID to a number
const parentId = parseInt(args.id, 10);
// Determine if we should generate files
const generateFiles = !args.skipGenerate;
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Case 1: Convert existing task to subtask
if (existingTaskId) {
log.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);
const result = await addSubtask(
tasksPath,
parentId,
existingTaskId,
null,
generateFiles
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,
subtask: result
}
};
}
// Case 2: Create new subtask
else {
log.info(`Creating new subtask for parent task ${parentId}`);
const newSubtaskData = {
title: args.title,
description: args.description || '',
details: args.details || '',
status: args.status || 'pending',
dependencies: dependencies
};
const result = await addSubtask(
tasksPath,
parentId,
null,
newSubtaskData,
generateFiles
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: `New subtask ${parentId}.${result.id} successfully created`,
subtask: result
}
};
}
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in addSubtaskDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}

View File

@@ -5,9 +5,19 @@
import { addTask } from '../../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import { getAnthropicClientForMCP, getModelConfig } from '../utils/ai-client-utils.js';
import { _buildAddTaskPrompt, parseTaskJsonResponse, _handleAnthropicStream } from '../../../../scripts/modules/ai-services.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import {
getAnthropicClientForMCP,
getModelConfig
} from '../utils/ai-client-utils.js';
import {
_buildAddTaskPrompt,
parseTaskJsonResponse,
_handleAnthropicStream
} from '../../../../scripts/modules/ai-services.js';
/**
* Direct function wrapper for adding a new task with error handling.
@@ -24,153 +34,162 @@ import { _buildAddTaskPrompt, parseTaskJsonResponse, _handleAnthropicStream } fr
* @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }
*/
export async function addTaskDirect(args, log, context = {}) {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Check required parameters
if (!args.prompt) {
log.error('Missing required parameter: prompt');
disableSilentMode();
return {
success: false,
error: {
code: 'MISSING_PARAMETER',
message: 'The prompt parameter is required for adding a task'
}
};
}
// Extract and prepare parameters
const prompt = args.prompt;
const dependencies = Array.isArray(args.dependencies)
? args.dependencies
: (args.dependencies ? String(args.dependencies).split(',').map(id => parseInt(id.trim(), 10)) : []);
const priority = args.priority || 'medium';
log.info(`Adding new task with prompt: "${prompt}", dependencies: [${dependencies.join(', ')}], priority: ${priority}`);
// Extract context parameters for advanced functionality
// Commenting out reportProgress extraction
// const { reportProgress, session } = context;
const { session } = context; // Keep session
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Initialize AI client with session environment
let localAnthropic;
try {
localAnthropic = getAnthropicClientForMCP(session, log);
} catch (error) {
log.error(`Failed to initialize Anthropic client: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
}
};
}
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Get model configuration from session
const modelConfig = getModelConfig(session);
// Check required parameters
if (!args.prompt) {
log.error('Missing required parameter: prompt');
disableSilentMode();
return {
success: false,
error: {
code: 'MISSING_PARAMETER',
message: 'The prompt parameter is required for adding a task'
}
};
}
// Read existing tasks to provide context
let tasksData;
try {
const fs = await import('fs');
tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
} catch (error) {
log.warn(`Could not read existing tasks for context: ${error.message}`);
tasksData = { tasks: [] };
}
// Extract and prepare parameters
const prompt = args.prompt;
const dependencies = Array.isArray(args.dependencies)
? args.dependencies
: args.dependencies
? String(args.dependencies)
.split(',')
.map((id) => parseInt(id.trim(), 10))
: [];
const priority = args.priority || 'medium';
// Build prompts for AI
const { systemPrompt, userPrompt } = _buildAddTaskPrompt(prompt, tasksData.tasks);
log.info(
`Adding new task with prompt: "${prompt}", dependencies: [${dependencies.join(', ')}], priority: ${priority}`
);
// Make the AI call using the streaming helper
let responseText;
try {
responseText = await _handleAnthropicStream(
localAnthropic,
{
model: modelConfig.model,
max_tokens: modelConfig.maxTokens,
temperature: modelConfig.temperature,
messages: [{ role: "user", content: userPrompt }],
system: systemPrompt
},
{
// reportProgress: context.reportProgress, // Commented out to prevent Cursor stroking out
mcpLog: log
}
);
} catch (error) {
log.error(`AI processing failed: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'AI_PROCESSING_ERROR',
message: `Failed to generate task with AI: ${error.message}`
}
};
}
// Extract context parameters for advanced functionality
// Commenting out reportProgress extraction
// const { reportProgress, session } = context;
const { session } = context; // Keep session
// Parse the AI response
let taskDataFromAI;
try {
taskDataFromAI = parseTaskJsonResponse(responseText);
} catch (error) {
log.error(`Failed to parse AI response: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'RESPONSE_PARSING_ERROR',
message: `Failed to parse AI response: ${error.message}`
}
};
}
// Call the addTask function with 'json' outputFormat to prevent console output when called via MCP
const newTaskId = await addTask(
tasksPath,
prompt,
dependencies,
priority,
{
// reportProgress, // Commented out
mcpLog: log,
session,
taskDataFromAI // Pass the parsed AI result
},
'json'
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
taskId: newTaskId,
message: `Successfully added new task #${newTaskId}`
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in addTaskDirect: ${error.message}`);
return {
success: false,
error: {
code: 'ADD_TASK_ERROR',
message: error.message
}
};
}
}
// Initialize AI client with session environment
let localAnthropic;
try {
localAnthropic = getAnthropicClientForMCP(session, log);
} catch (error) {
log.error(`Failed to initialize Anthropic client: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
}
};
}
// Get model configuration from session
const modelConfig = getModelConfig(session);
// Read existing tasks to provide context
let tasksData;
try {
const fs = await import('fs');
tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
} catch (error) {
log.warn(`Could not read existing tasks for context: ${error.message}`);
tasksData = { tasks: [] };
}
// Build prompts for AI
const { systemPrompt, userPrompt } = _buildAddTaskPrompt(
prompt,
tasksData.tasks
);
// Make the AI call using the streaming helper
let responseText;
try {
responseText = await _handleAnthropicStream(
localAnthropic,
{
model: modelConfig.model,
max_tokens: modelConfig.maxTokens,
temperature: modelConfig.temperature,
messages: [{ role: 'user', content: userPrompt }],
system: systemPrompt
},
{
// reportProgress: context.reportProgress, // Commented out to prevent Cursor stroking out
mcpLog: log
}
);
} catch (error) {
log.error(`AI processing failed: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'AI_PROCESSING_ERROR',
message: `Failed to generate task with AI: ${error.message}`
}
};
}
// Parse the AI response
let taskDataFromAI;
try {
taskDataFromAI = parseTaskJsonResponse(responseText);
} catch (error) {
log.error(`Failed to parse AI response: ${error.message}`);
disableSilentMode();
return {
success: false,
error: {
code: 'RESPONSE_PARSING_ERROR',
message: `Failed to parse AI response: ${error.message}`
}
};
}
// Call the addTask function with 'json' outputFormat to prevent console output when called via MCP
const newTaskId = await addTask(
tasksPath,
prompt,
dependencies,
priority,
{
// reportProgress, // Commented out
mcpLog: log,
session,
taskDataFromAI // Pass the parsed AI result
},
'json'
);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
taskId: newTaskId,
message: `Successfully added new task #${newTaskId}`
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in addTaskDirect: ${error.message}`);
return {
success: false,
error: {
code: 'ADD_TASK_ERROR',
message: error.message
}
};
}
}

View File

@@ -4,7 +4,12 @@
import { analyzeTaskComplexity } from '../../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode, isSilentMode, readJSON } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode,
isSilentMode,
readJSON
} from '../../../../scripts/modules/utils.js';
import fs from 'fs';
import path from 'path';
@@ -22,135 +27,142 @@ import path from 'path';
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
*/
export async function analyzeTaskComplexityDirect(args, log, context = {}) {
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Determine output path
let outputPath = args.output || 'scripts/task-complexity-report.json';
if (!path.isAbsolute(outputPath) && args.projectRoot) {
outputPath = path.join(args.projectRoot, outputPath);
}
log.info(`Analyzing task complexity from: ${tasksPath}`);
log.info(`Output report will be saved to: ${outputPath}`);
if (args.research) {
log.info('Using Perplexity AI for research-backed complexity analysis');
}
// Create options object for analyzeTaskComplexity
const options = {
file: tasksPath,
output: outputPath,
model: args.model,
threshold: args.threshold,
research: args.research === true
};
// Enable silent mode to prevent console logs from interfering with JSON response
const wasSilent = isSilentMode();
if (!wasSilent) {
enableSilentMode();
}
// Create a logWrapper that matches the expected mcpLog interface as specified in utilities.mdc
const logWrapper = {
info: (message, ...args) => log.info(message, ...args),
warn: (message, ...args) => log.warn(message, ...args),
error: (message, ...args) => log.error(message, ...args),
debug: (message, ...args) => log.debug && log.debug(message, ...args),
success: (message, ...args) => log.info(message, ...args) // Map success to info
};
try {
// Call the core function with session and logWrapper as mcpLog
await analyzeTaskComplexity(options, {
session,
mcpLog: logWrapper // Use the wrapper instead of passing log directly
});
} catch (error) {
log.error(`Error in analyzeTaskComplexity: ${error.message}`);
return {
success: false,
error: {
code: 'ANALYZE_ERROR',
message: `Error running complexity analysis: ${error.message}`
}
};
} finally {
// Always restore normal logging in finally block, but only if we enabled it
if (!wasSilent) {
disableSilentMode();
}
}
// Verify the report file was created
if (!fs.existsSync(outputPath)) {
return {
success: false,
error: {
code: 'ANALYZE_ERROR',
message: 'Analysis completed but no report file was created'
}
};
}
// Read the report file
let report;
try {
report = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
// Important: Handle different report formats
// The core function might return an array or an object with a complexityAnalysis property
const analysisArray = Array.isArray(report) ? report :
(report.complexityAnalysis || []);
// Count tasks by complexity
const highComplexityTasks = analysisArray.filter(t => t.complexityScore >= 8).length;
const mediumComplexityTasks = analysisArray.filter(t => t.complexityScore >= 5 && t.complexityScore < 8).length;
const lowComplexityTasks = analysisArray.filter(t => t.complexityScore < 5).length;
return {
success: true,
data: {
message: `Task complexity analysis complete. Report saved to ${outputPath}`,
reportPath: outputPath,
reportSummary: {
taskCount: analysisArray.length,
highComplexityTasks,
mediumComplexityTasks,
lowComplexityTasks
}
}
};
} catch (parseError) {
log.error(`Error parsing report file: ${parseError.message}`);
return {
success: false,
error: {
code: 'REPORT_PARSE_ERROR',
message: `Error parsing complexity report: ${parseError.message}`
}
};
}
} catch (error) {
// Make sure to restore normal logging even if there's an error
if (isSilentMode()) {
disableSilentMode();
}
log.error(`Error in analyzeTaskComplexityDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Determine output path
let outputPath = args.output || 'scripts/task-complexity-report.json';
if (!path.isAbsolute(outputPath) && args.projectRoot) {
outputPath = path.join(args.projectRoot, outputPath);
}
log.info(`Analyzing task complexity from: ${tasksPath}`);
log.info(`Output report will be saved to: ${outputPath}`);
if (args.research) {
log.info('Using Perplexity AI for research-backed complexity analysis');
}
// Create options object for analyzeTaskComplexity
const options = {
file: tasksPath,
output: outputPath,
model: args.model,
threshold: args.threshold,
research: args.research === true
};
// Enable silent mode to prevent console logs from interfering with JSON response
const wasSilent = isSilentMode();
if (!wasSilent) {
enableSilentMode();
}
// Create a logWrapper that matches the expected mcpLog interface as specified in utilities.mdc
const logWrapper = {
info: (message, ...args) => log.info(message, ...args),
warn: (message, ...args) => log.warn(message, ...args),
error: (message, ...args) => log.error(message, ...args),
debug: (message, ...args) => log.debug && log.debug(message, ...args),
success: (message, ...args) => log.info(message, ...args) // Map success to info
};
try {
// Call the core function with session and logWrapper as mcpLog
await analyzeTaskComplexity(options, {
session,
mcpLog: logWrapper // Use the wrapper instead of passing log directly
});
} catch (error) {
log.error(`Error in analyzeTaskComplexity: ${error.message}`);
return {
success: false,
error: {
code: 'ANALYZE_ERROR',
message: `Error running complexity analysis: ${error.message}`
}
};
} finally {
// Always restore normal logging in finally block, but only if we enabled it
if (!wasSilent) {
disableSilentMode();
}
}
// Verify the report file was created
if (!fs.existsSync(outputPath)) {
return {
success: false,
error: {
code: 'ANALYZE_ERROR',
message: 'Analysis completed but no report file was created'
}
};
}
// Read the report file
let report;
try {
report = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
// Important: Handle different report formats
// The core function might return an array or an object with a complexityAnalysis property
const analysisArray = Array.isArray(report)
? report
: report.complexityAnalysis || [];
// Count tasks by complexity
const highComplexityTasks = analysisArray.filter(
(t) => t.complexityScore >= 8
).length;
const mediumComplexityTasks = analysisArray.filter(
(t) => t.complexityScore >= 5 && t.complexityScore < 8
).length;
const lowComplexityTasks = analysisArray.filter(
(t) => t.complexityScore < 5
).length;
return {
success: true,
data: {
message: `Task complexity analysis complete. Report saved to ${outputPath}`,
reportPath: outputPath,
reportSummary: {
taskCount: analysisArray.length,
highComplexityTasks,
mediumComplexityTasks,
lowComplexityTasks
}
}
};
} catch (parseError) {
log.error(`Error parsing report file: ${parseError.message}`);
return {
success: false,
error: {
code: 'REPORT_PARSE_ERROR',
message: `Error parsing complexity report: ${parseError.message}`
}
};
}
} catch (error) {
// Make sure to restore normal logging even if there's an error
if (isSilentMode()) {
disableSilentMode();
}
log.error(`Error in analyzeTaskComplexityDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}

View File

@@ -12,21 +12,21 @@ import { contextManager } from '../context-manager.js';
* @returns {Object} - Cache statistics
*/
export async function getCacheStatsDirect(args, log) {
try {
log.info('Retrieving cache statistics');
const stats = contextManager.getStats();
return {
success: true,
data: stats
};
} catch (error) {
log.error(`Error getting cache stats: ${error.message}`);
return {
success: false,
error: {
code: 'CACHE_STATS_ERROR',
message: error.message || 'Unknown error occurred'
}
};
}
}
try {
log.info('Retrieving cache statistics');
const stats = contextManager.getStats();
return {
success: true,
data: stats
};
} catch (error) {
log.error(`Error getting cache stats: ${error.message}`);
return {
success: false,
error: {
code: 'CACHE_STATS_ERROR',
message: error.message || 'Unknown error occurred'
}
};
}
}

View File

@@ -4,7 +4,10 @@
import { clearSubtasks } from '../../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import fs from 'fs';
/**
@@ -18,95 +21,96 @@ import fs from 'fs';
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
*/
export async function clearSubtasksDirect(args, log) {
try {
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
// Either id or all must be provided
if (!args.id && !args.all) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Either task IDs with id parameter or all parameter must be provided'
}
};
}
try {
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Check if tasks.json exists
if (!fs.existsSync(tasksPath)) {
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: `Tasks file not found at ${tasksPath}`
}
};
}
let taskIds;
// If all is specified, get all task IDs
if (args.all) {
log.info('Clearing subtasks from all tasks');
const data = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
if (!data || !data.tasks || data.tasks.length === 0) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'No valid tasks found in the tasks file'
}
};
}
taskIds = data.tasks.map(t => t.id).join(',');
} else {
// Use the provided task IDs
taskIds = args.id;
}
log.info(`Clearing subtasks from tasks: ${taskIds}`);
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the core function
clearSubtasks(tasksPath, taskIds);
// Restore normal logging
disableSilentMode();
// Read the updated data to provide a summary
const updatedData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
const taskIdArray = taskIds.split(',').map(id => parseInt(id.trim(), 10));
// Build a summary of what was done
const clearedTasksCount = taskIdArray.length;
const taskSummary = taskIdArray.map(id => {
const task = updatedData.tasks.find(t => t.id === id);
return task ? { id, title: task.title } : { id, title: 'Task not found' };
});
return {
success: true,
data: {
message: `Successfully cleared subtasks from ${clearedTasksCount} task(s)`,
tasksCleared: taskSummary
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in clearSubtasksDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}
// Either id or all must be provided
if (!args.id && !args.all) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message:
'Either task IDs with id parameter or all parameter must be provided'
}
};
}
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Check if tasks.json exists
if (!fs.existsSync(tasksPath)) {
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: `Tasks file not found at ${tasksPath}`
}
};
}
let taskIds;
// If all is specified, get all task IDs
if (args.all) {
log.info('Clearing subtasks from all tasks');
const data = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
if (!data || !data.tasks || data.tasks.length === 0) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'No valid tasks found in the tasks file'
}
};
}
taskIds = data.tasks.map((t) => t.id).join(',');
} else {
// Use the provided task IDs
taskIds = args.id;
}
log.info(`Clearing subtasks from tasks: ${taskIds}`);
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the core function
clearSubtasks(tasksPath, taskIds);
// Restore normal logging
disableSilentMode();
// Read the updated data to provide a summary
const updatedData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
const taskIdArray = taskIds.split(',').map((id) => parseInt(id.trim(), 10));
// Build a summary of what was done
const clearedTasksCount = taskIdArray.length;
const taskSummary = taskIdArray.map((id) => {
const task = updatedData.tasks.find((t) => t.id === id);
return task ? { id, title: task.title } : { id, title: 'Task not found' };
});
return {
success: true,
data: {
message: `Successfully cleared subtasks from ${clearedTasksCount} task(s)`,
tasksCleared: taskSummary
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in clearSubtasksDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}

View File

@@ -3,119 +3,131 @@
* Direct function implementation for displaying complexity analysis report
*/
import { readComplexityReport, enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
readComplexityReport,
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { getCachedOrExecute } from '../../tools/utils.js';
import path from 'path';
/**
* Direct function wrapper for displaying the complexity report with error handling and caching.
*
*
* @param {Object} args - Command arguments containing file path option
* @param {Object} log - Logger object
* @returns {Promise<Object>} - Result object with success status and data/error information
*/
export async function complexityReportDirect(args, log) {
try {
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
// Get tasks file path to determine project root for the default report location
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.warn(`Tasks file not found, using current directory: ${error.message}`);
// Continue with default or specified report path
}
try {
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
// Get report file path from args or use default
const reportPath = args.file || path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
log.info(`Looking for complexity report at: ${reportPath}`);
// Generate cache key based on report path
const cacheKey = `complexityReport:${reportPath}`;
// Define the core action function to read the report
const coreActionFn = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
const report = readComplexityReport(reportPath);
// Restore normal logging
disableSilentMode();
if (!report) {
log.warn(`No complexity report found at ${reportPath}`);
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`
}
};
}
return {
success: true,
data: {
report,
reportPath
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error reading complexity report: ${error.message}`);
return {
success: false,
error: {
code: 'READ_ERROR',
message: error.message
}
};
}
};
// Get tasks file path to determine project root for the default report location
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.warn(
`Tasks file not found, using current directory: ${error.message}`
);
// Continue with default or specified report path
}
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreActionFn,
log
});
log.info(`complexityReportDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) {
// Catch unexpected errors from getCachedOrExecute itself
// Ensure silent mode is disabled
disableSilentMode();
log.error(`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
} catch (error) {
// Ensure silent mode is disabled if an outer error occurs
disableSilentMode();
log.error(`Error in complexityReportDirect: ${error.message}`);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
}
// Get report file path from args or use default
const reportPath =
args.file ||
path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
log.info(`Looking for complexity report at: ${reportPath}`);
// Generate cache key based on report path
const cacheKey = `complexityReport:${reportPath}`;
// Define the core action function to read the report
const coreActionFn = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
const report = readComplexityReport(reportPath);
// Restore normal logging
disableSilentMode();
if (!report) {
log.warn(`No complexity report found at ${reportPath}`);
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`
}
};
}
return {
success: true,
data: {
report,
reportPath
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error reading complexity report: ${error.message}`);
return {
success: false,
error: {
code: 'READ_ERROR',
message: error.message
}
};
}
};
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreActionFn,
log
});
log.info(
`complexityReportDirect completed. From cache: ${result.fromCache}`
);
return result; // Returns { success, data/error, fromCache }
} catch (error) {
// Catch unexpected errors from getCachedOrExecute itself
// Ensure silent mode is disabled
disableSilentMode();
log.error(
`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`
);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
} catch (error) {
// Ensure silent mode is disabled if an outer error occurs
disableSilentMode();
log.error(`Error in complexityReportDirect: ${error.message}`);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
}

View File

@@ -3,7 +3,11 @@
*/
import { expandAllTasks } from '../../../../scripts/modules/task-manager.js';
import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode,
isSilentMode
} from '../../../../scripts/modules/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { getAnthropicClientForMCP } from '../utils/ai-client-utils.js';
import path from 'path';
@@ -23,98 +27,100 @@ import fs from 'fs';
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
*/
export async function expandAllTasksDirect(args, log, context = {}) {
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
// Enable silent mode early to prevent any console output
enableSilentMode();
try {
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Parse parameters
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
const useResearch = args.research === true;
const additionalContext = args.prompt || '';
const forceFlag = args.force === true;
log.info(`Expanding all tasks with ${numSubtasks || 'default'} subtasks each...`);
if (useResearch) {
log.info('Using Perplexity AI for research-backed subtask generation');
// Initialize AI client for research-backed expansion
try {
await getAnthropicClientForMCP(session, log);
} catch (error) {
// Ensure silent mode is disabled before returning error
disableSilentMode();
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
}
};
}
}
if (additionalContext) {
log.info(`Additional context: "${additionalContext}"`);
}
if (forceFlag) {
log.info('Force regeneration of subtasks is enabled');
}
// Call the core function with session context for AI operations
// and outputFormat as 'json' to prevent UI elements
const result = await expandAllTasks(
tasksPath,
numSubtasks,
useResearch,
additionalContext,
forceFlag,
{ mcpLog: log, session },
'json' // Use JSON output format to prevent UI elements
);
// The expandAllTasks function now returns a result object
return {
success: true,
data: {
message: "Successfully expanded all pending tasks with subtasks",
details: {
numSubtasks: numSubtasks,
research: useResearch,
prompt: additionalContext,
force: forceFlag,
tasksExpanded: result.expandedCount,
totalEligibleTasks: result.tasksToExpand
}
}
};
} finally {
// Restore normal logging in finally block to ensure it runs even if there's an error
disableSilentMode();
}
} catch (error) {
// Ensure silent mode is disabled if an error occurs
if (isSilentMode()) {
disableSilentMode();
}
log.error(`Error in expandAllTasksDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
// Enable silent mode early to prevent any console output
enableSilentMode();
try {
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Parse parameters
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
const useResearch = args.research === true;
const additionalContext = args.prompt || '';
const forceFlag = args.force === true;
log.info(
`Expanding all tasks with ${numSubtasks || 'default'} subtasks each...`
);
if (useResearch) {
log.info('Using Perplexity AI for research-backed subtask generation');
// Initialize AI client for research-backed expansion
try {
await getAnthropicClientForMCP(session, log);
} catch (error) {
// Ensure silent mode is disabled before returning error
disableSilentMode();
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
}
};
}
}
if (additionalContext) {
log.info(`Additional context: "${additionalContext}"`);
}
if (forceFlag) {
log.info('Force regeneration of subtasks is enabled');
}
// Call the core function with session context for AI operations
// and outputFormat as 'json' to prevent UI elements
const result = await expandAllTasks(
tasksPath,
numSubtasks,
useResearch,
additionalContext,
forceFlag,
{ mcpLog: log, session },
'json' // Use JSON output format to prevent UI elements
);
// The expandAllTasks function now returns a result object
return {
success: true,
data: {
message: 'Successfully expanded all pending tasks with subtasks',
details: {
numSubtasks: numSubtasks,
research: useResearch,
prompt: additionalContext,
force: forceFlag,
tasksExpanded: result.expandedCount,
totalEligibleTasks: result.tasksToExpand
}
}
};
} finally {
// Restore normal logging in finally block to ensure it runs even if there's an error
disableSilentMode();
}
} catch (error) {
// Ensure silent mode is disabled if an error occurs
if (isSilentMode()) {
disableSilentMode();
}
log.error(`Error in expandAllTasksDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}

View File

@@ -4,9 +4,18 @@
*/
import { expandTask } from '../../../../scripts/modules/task-manager.js';
import { readJSON, writeJSON, enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';
import {
readJSON,
writeJSON,
enableSilentMode,
disableSilentMode,
isSilentMode
} from '../../../../scripts/modules/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { getAnthropicClientForMCP, getModelConfig } from '../utils/ai-client-utils.js';
import {
getAnthropicClientForMCP,
getModelConfig
} from '../utils/ai-client-utils.js';
import path from 'path';
import fs from 'fs';
@@ -19,231 +28,248 @@ import fs from 'fs';
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
*/
export async function expandTaskDirect(args, log, context = {}) {
const { session } = context;
// Log session root data for debugging
log.info(`Session data in expandTaskDirect: ${JSON.stringify({
hasSession: !!session,
sessionKeys: session ? Object.keys(session) : [],
roots: session?.roots,
rootsStr: JSON.stringify(session?.roots)
})}`);
let tasksPath;
try {
// If a direct file path is provided, use it directly
if (args.file && fs.existsSync(args.file)) {
log.info(`[expandTaskDirect] Using explicitly provided tasks file: ${args.file}`);
tasksPath = args.file;
} else {
// Find the tasks path through standard logic
log.info(`[expandTaskDirect] No direct file path provided or file not found at ${args.file}, searching using findTasksJsonPath`);
tasksPath = findTasksJsonPath(args, log);
}
} catch (error) {
log.error(`[expandTaskDirect] Error during tasksPath determination: ${error.message}`);
// Include session roots information in error
const sessionRootsInfo = session ?
`\nSession.roots: ${JSON.stringify(session.roots)}\n` +
`Current Working Directory: ${process.cwd()}\n` +
`Args.projectRoot: ${args.projectRoot}\n` +
`Args.file: ${args.file}\n` :
'\nSession object not available';
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: `Error determining tasksPath: ${error.message}${sessionRootsInfo}`
},
fromCache: false
};
}
const { session } = context;
log.info(`[expandTaskDirect] Determined tasksPath: ${tasksPath}`);
// Log session root data for debugging
log.info(
`Session data in expandTaskDirect: ${JSON.stringify({
hasSession: !!session,
sessionKeys: session ? Object.keys(session) : [],
roots: session?.roots,
rootsStr: JSON.stringify(session?.roots)
})}`
);
// Validate task ID
const taskId = args.id ? parseInt(args.id, 10) : null;
if (!taskId) {
log.error('Task ID is required');
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID is required'
},
fromCache: false
};
}
let tasksPath;
try {
// If a direct file path is provided, use it directly
if (args.file && fs.existsSync(args.file)) {
log.info(
`[expandTaskDirect] Using explicitly provided tasks file: ${args.file}`
);
tasksPath = args.file;
} else {
// Find the tasks path through standard logic
log.info(
`[expandTaskDirect] No direct file path provided or file not found at ${args.file}, searching using findTasksJsonPath`
);
tasksPath = findTasksJsonPath(args, log);
}
} catch (error) {
log.error(
`[expandTaskDirect] Error during tasksPath determination: ${error.message}`
);
// Process other parameters
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
const useResearch = args.research === true;
const additionalContext = args.prompt || '';
// Include session roots information in error
const sessionRootsInfo = session
? `\nSession.roots: ${JSON.stringify(session.roots)}\n` +
`Current Working Directory: ${process.cwd()}\n` +
`Args.projectRoot: ${args.projectRoot}\n` +
`Args.file: ${args.file}\n`
: '\nSession object not available';
// Initialize AI client if needed (for expandTask function)
try {
// This ensures the AI client is available by checking it
if (useResearch) {
log.info('Verifying AI client for research-backed expansion');
await getAnthropicClientForMCP(session, log);
}
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: `Error determining tasksPath: ${error.message}${sessionRootsInfo}`
},
fromCache: false
};
}
try {
log.info(`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}`);
// Read tasks data
log.info(`[expandTaskDirect] Attempting to read JSON from: ${tasksPath}`);
const data = readJSON(tasksPath);
log.info(`[expandTaskDirect] Result of readJSON: ${data ? 'Data read successfully' : 'readJSON returned null or undefined'}`);
log.info(`[expandTaskDirect] Determined tasksPath: ${tasksPath}`);
if (!data || !data.tasks) {
log.error(`[expandTaskDirect] readJSON failed or returned invalid data for path: ${tasksPath}`);
return {
success: false,
error: {
code: 'INVALID_TASKS_FILE',
message: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`
},
fromCache: false
};
}
// Find the specific task
log.info(`[expandTaskDirect] Searching for task ID ${taskId} in data`);
const task = data.tasks.find(t => t.id === taskId);
log.info(`[expandTaskDirect] Task found: ${task ? 'Yes' : 'No'}`);
if (!task) {
return {
success: false,
error: {
code: 'TASK_NOT_FOUND',
message: `Task with ID ${taskId} not found`
},
fromCache: false
};
}
// Check if task is completed
if (task.status === 'done' || task.status === 'completed') {
return {
success: false,
error: {
code: 'TASK_COMPLETED',
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
},
fromCache: false
};
}
// Check for existing subtasks
const hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;
// If the task already has subtasks, just return it (matching core behavior)
if (hasExistingSubtasks) {
log.info(`Task ${taskId} already has ${task.subtasks.length} subtasks`);
return {
success: true,
data: {
task,
subtasksAdded: 0,
hasExistingSubtasks
},
fromCache: false
};
}
// Keep a copy of the task before modification
const originalTask = JSON.parse(JSON.stringify(task));
// Tracking subtasks count before expansion
const subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;
// Create a backup of the tasks.json file
const backupPath = path.join(path.dirname(tasksPath), 'tasks.json.bak');
fs.copyFileSync(tasksPath, backupPath);
// Directly modify the data instead of calling the CLI function
if (!task.subtasks) {
task.subtasks = [];
}
// Save tasks.json with potentially empty subtasks array
writeJSON(tasksPath, data);
// Process the request
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call expandTask with session context to ensure AI client is properly initialized
const result = await expandTask(
tasksPath,
taskId,
numSubtasks,
useResearch,
additionalContext,
{ mcpLog: log, session } // Only pass mcpLog and session, NOT reportProgress
);
// Restore normal logging
disableSilentMode();
// Read the updated data
const updatedData = readJSON(tasksPath);
const updatedTask = updatedData.tasks.find(t => t.id === taskId);
// Calculate how many subtasks were added
const subtasksAdded = updatedTask.subtasks ?
updatedTask.subtasks.length - subtasksCountBefore : 0;
// Return the result
log.info(`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`);
return {
success: true,
data: {
task: updatedTask,
subtasksAdded,
hasExistingSubtasks
},
fromCache: false
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error expanding task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to expand task'
},
fromCache: false
};
}
} catch (error) {
log.error(`Error expanding task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to expand task'
},
fromCache: false
};
}
}
// Validate task ID
const taskId = args.id ? parseInt(args.id, 10) : null;
if (!taskId) {
log.error('Task ID is required');
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID is required'
},
fromCache: false
};
}
// Process other parameters
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
const useResearch = args.research === true;
const additionalContext = args.prompt || '';
// Initialize AI client if needed (for expandTask function)
try {
// This ensures the AI client is available by checking it
if (useResearch) {
log.info('Verifying AI client for research-backed expansion');
await getAnthropicClientForMCP(session, log);
}
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
try {
log.info(
`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}`
);
// Read tasks data
log.info(`[expandTaskDirect] Attempting to read JSON from: ${tasksPath}`);
const data = readJSON(tasksPath);
log.info(
`[expandTaskDirect] Result of readJSON: ${data ? 'Data read successfully' : 'readJSON returned null or undefined'}`
);
if (!data || !data.tasks) {
log.error(
`[expandTaskDirect] readJSON failed or returned invalid data for path: ${tasksPath}`
);
return {
success: false,
error: {
code: 'INVALID_TASKS_FILE',
message: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`
},
fromCache: false
};
}
// Find the specific task
log.info(`[expandTaskDirect] Searching for task ID ${taskId} in data`);
const task = data.tasks.find((t) => t.id === taskId);
log.info(`[expandTaskDirect] Task found: ${task ? 'Yes' : 'No'}`);
if (!task) {
return {
success: false,
error: {
code: 'TASK_NOT_FOUND',
message: `Task with ID ${taskId} not found`
},
fromCache: false
};
}
// Check if task is completed
if (task.status === 'done' || task.status === 'completed') {
return {
success: false,
error: {
code: 'TASK_COMPLETED',
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
},
fromCache: false
};
}
// Check for existing subtasks
const hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;
// If the task already has subtasks, just return it (matching core behavior)
if (hasExistingSubtasks) {
log.info(`Task ${taskId} already has ${task.subtasks.length} subtasks`);
return {
success: true,
data: {
task,
subtasksAdded: 0,
hasExistingSubtasks
},
fromCache: false
};
}
// Keep a copy of the task before modification
const originalTask = JSON.parse(JSON.stringify(task));
// Tracking subtasks count before expansion
const subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;
// Create a backup of the tasks.json file
const backupPath = path.join(path.dirname(tasksPath), 'tasks.json.bak');
fs.copyFileSync(tasksPath, backupPath);
// Directly modify the data instead of calling the CLI function
if (!task.subtasks) {
task.subtasks = [];
}
// Save tasks.json with potentially empty subtasks array
writeJSON(tasksPath, data);
// Process the request
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call expandTask with session context to ensure AI client is properly initialized
const result = await expandTask(
tasksPath,
taskId,
numSubtasks,
useResearch,
additionalContext,
{ mcpLog: log, session } // Only pass mcpLog and session, NOT reportProgress
);
// Restore normal logging
disableSilentMode();
// Read the updated data
const updatedData = readJSON(tasksPath);
const updatedTask = updatedData.tasks.find((t) => t.id === taskId);
// Calculate how many subtasks were added
const subtasksAdded = updatedTask.subtasks
? updatedTask.subtasks.length - subtasksCountBefore
: 0;
// Return the result
log.info(
`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`
);
return {
success: true,
data: {
task: updatedTask,
subtasksAdded,
hasExistingSubtasks
},
fromCache: false
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error expanding task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to expand task'
},
fromCache: false
};
}
} catch (error) {
log.error(`Error expanding task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to expand task'
},
fromCache: false
};
}
}

View File

@@ -4,7 +4,10 @@
import { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import fs from 'fs';
/**
@@ -16,50 +19,50 @@ import fs from 'fs';
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
*/
export async function fixDependenciesDirect(args, log) {
try {
log.info(`Fixing invalid dependencies in tasks...`);
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Verify the file exists
if (!fs.existsSync(tasksPath)) {
return {
success: false,
error: {
code: 'FILE_NOT_FOUND',
message: `Tasks file not found at ${tasksPath}`
}
};
}
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the original command function
await fixDependenciesCommand(tasksPath);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: 'Dependencies fixed successfully',
tasksPath
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error fixing dependencies: ${error.message}`);
return {
success: false,
error: {
code: 'FIX_DEPENDENCIES_ERROR',
message: error.message
}
};
}
}
try {
log.info(`Fixing invalid dependencies in tasks...`);
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Verify the file exists
if (!fs.existsSync(tasksPath)) {
return {
success: false,
error: {
code: 'FILE_NOT_FOUND',
message: `Tasks file not found at ${tasksPath}`
}
};
}
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the original command function
await fixDependenciesCommand(tasksPath);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: 'Dependencies fixed successfully',
tasksPath
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error fixing dependencies: ${error.message}`);
return {
success: false,
error: {
code: 'FIX_DEPENDENCIES_ERROR',
message: error.message
}
};
}
}

View File

@@ -4,84 +4,91 @@
*/
import { generateTaskFiles } from '../../../../scripts/modules/task-manager.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import path from 'path';
/**
* Direct function wrapper for generateTaskFiles with error handling.
*
*
* @param {Object} args - Command arguments containing file and output path options.
* @param {Object} log - Logger object.
* @returns {Promise<Object>} - Result object with success status and data/error information.
*/
export async function generateTaskFilesDirect(args, log) {
try {
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
// Get tasks file path
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: { code: 'TASKS_FILE_ERROR', message: error.message },
fromCache: false
};
}
// Get output directory (defaults to the same directory as the tasks file)
let outputDir = args.output;
if (!outputDir) {
outputDir = path.dirname(tasksPath);
}
log.info(`Generating task files from ${tasksPath} to ${outputDir}`);
// Execute core generateTaskFiles function in a separate try/catch
try {
// Enable silent mode to prevent logs from being written to stdout
enableSilentMode();
// The function is synchronous despite being awaited elsewhere
generateTaskFiles(tasksPath, outputDir);
// Restore normal logging after task generation
disableSilentMode();
} catch (genError) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in generateTaskFiles: ${genError.message}`);
return {
success: false,
error: { code: 'GENERATE_FILES_ERROR', message: genError.message },
fromCache: false
};
}
// Return success with file paths
return {
success: true,
data: {
message: `Successfully generated task files`,
tasksPath,
outputDir,
taskFiles: 'Individual task files have been generated in the output directory'
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
// Make sure to restore normal logging if an outer error occurs
disableSilentMode();
log.error(`Error generating task files: ${error.message}`);
return {
success: false,
error: { code: 'GENERATE_TASKS_ERROR', message: error.message || 'Unknown error generating task files' },
fromCache: false
};
}
}
try {
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
// Get tasks file path
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: { code: 'TASKS_FILE_ERROR', message: error.message },
fromCache: false
};
}
// Get output directory (defaults to the same directory as the tasks file)
let outputDir = args.output;
if (!outputDir) {
outputDir = path.dirname(tasksPath);
}
log.info(`Generating task files from ${tasksPath} to ${outputDir}`);
// Execute core generateTaskFiles function in a separate try/catch
try {
// Enable silent mode to prevent logs from being written to stdout
enableSilentMode();
// The function is synchronous despite being awaited elsewhere
generateTaskFiles(tasksPath, outputDir);
// Restore normal logging after task generation
disableSilentMode();
} catch (genError) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in generateTaskFiles: ${genError.message}`);
return {
success: false,
error: { code: 'GENERATE_FILES_ERROR', message: genError.message },
fromCache: false
};
}
// Return success with file paths
return {
success: true,
data: {
message: `Successfully generated task files`,
tasksPath,
outputDir,
taskFiles:
'Individual task files have been generated in the output directory'
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
// Make sure to restore normal logging if an outer error occurs
disableSilentMode();
log.error(`Error generating task files: ${error.message}`);
return {
success: false,
error: {
code: 'GENERATE_TASKS_ERROR',
message: error.message || 'Unknown error generating task files'
},
fromCache: false
};
}
}

View File

@@ -6,7 +6,10 @@
import { listTasks } from '../../../../scripts/modules/task-manager.js';
import { getCachedOrExecute } from '../../tools/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Direct function wrapper for listTasks with error handling and caching.
@@ -16,68 +19,102 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
*/
export async function listTasksDirect(args, log) {
let tasksPath;
try {
// Find the tasks path first - needed for cache key and execution
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
if (error.code === 'TASKS_FILE_NOT_FOUND') {
log.error(`Tasks file not found: ${error.message}`);
// Return the error structure expected by the calling tool/handler
return { success: false, error: { code: error.code, message: error.message }, fromCache: false };
}
log.error(`Unexpected error finding tasks file: ${error.message}`);
// Re-throw for outer catch or return structured error
return { success: false, error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message }, fromCache: false };
}
let tasksPath;
try {
// Find the tasks path first - needed for cache key and execution
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
if (error.code === 'TASKS_FILE_NOT_FOUND') {
log.error(`Tasks file not found: ${error.message}`);
// Return the error structure expected by the calling tool/handler
return {
success: false,
error: { code: error.code, message: error.message },
fromCache: false
};
}
log.error(`Unexpected error finding tasks file: ${error.message}`);
// Re-throw for outer catch or return structured error
return {
success: false,
error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message },
fromCache: false
};
}
// Generate cache key *after* finding tasksPath
const statusFilter = args.status || 'all';
const withSubtasks = args.withSubtasks || false;
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
// Define the action function to be executed on cache miss
const coreListTasksAction = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
log.info(`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`);
const resultData = listTasks(tasksPath, statusFilter, withSubtasks, 'json');
// Generate cache key *after* finding tasksPath
const statusFilter = args.status || 'all';
const withSubtasks = args.withSubtasks || false;
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
if (!resultData || !resultData.tasks) {
log.error('Invalid or empty response from listTasks core function');
return { success: false, error: { code: 'INVALID_CORE_RESPONSE', message: 'Invalid or empty response from listTasks core function' } };
}
log.info(`Core listTasks function retrieved ${resultData.tasks.length} tasks`);
// Restore normal logging
disableSilentMode();
return { success: true, data: resultData };
// Define the action function to be executed on cache miss
const coreListTasksAction = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Core listTasks function failed: ${error.message}`);
return { success: false, error: { code: 'LIST_TASKS_CORE_ERROR', message: error.message || 'Failed to list tasks' } };
}
};
log.info(
`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`
);
const resultData = listTasks(
tasksPath,
statusFilter,
withSubtasks,
'json'
);
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreListTasksAction,
log
});
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch(error) {
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
log.error(`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`);
console.error(error.stack);
return { success: false, error: { code: 'CACHE_UTIL_ERROR', message: error.message }, fromCache: false };
}
}
if (!resultData || !resultData.tasks) {
log.error('Invalid or empty response from listTasks core function');
return {
success: false,
error: {
code: 'INVALID_CORE_RESPONSE',
message: 'Invalid or empty response from listTasks core function'
}
};
}
log.info(
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
);
// Restore normal logging
disableSilentMode();
return { success: true, data: resultData };
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Core listTasks function failed: ${error.message}`);
return {
success: false,
error: {
code: 'LIST_TASKS_CORE_ERROR',
message: error.message || 'Failed to list tasks'
}
};
}
};
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreListTasksAction,
log
});
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) {
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
log.error(
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
);
console.error(error.stack);
return {
success: false,
error: { code: 'CACHE_UTIL_ERROR', message: error.message },
fromCache: false
};
}
}

View File

@@ -7,7 +7,10 @@ import { findNextTask } from '../../../../scripts/modules/task-manager.js';
import { readJSON } from '../../../../scripts/modules/utils.js';
import { getCachedOrExecute } from '../../tools/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Direct function wrapper for finding the next task to work on with error handling and caching.
@@ -17,106 +20,113 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
*/
export async function nextTaskDirect(args, log) {
let tasksPath;
try {
// Find the tasks path first - needed for cache key and execution
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Tasks file not found: ${error.message}`);
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: error.message
},
fromCache: false
};
}
let tasksPath;
try {
// Find the tasks path first - needed for cache key and execution
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Tasks file not found: ${error.message}`);
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: error.message
},
fromCache: false
};
}
// Generate cache key using task path
const cacheKey = `nextTask:${tasksPath}`;
// Define the action function to be executed on cache miss
const coreNextTaskAction = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
log.info(`Finding next task from ${tasksPath}`);
// Read tasks data
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
return {
success: false,
error: {
code: 'INVALID_TASKS_FILE',
message: `No valid tasks found in ${tasksPath}`
}
};
}
// Find the next task
const nextTask = findNextTask(data.tasks);
if (!nextTask) {
log.info('No eligible next task found. All tasks are either completed or have unsatisfied dependencies');
return {
success: true,
data: {
message: 'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',
nextTask: null,
allTasks: data.tasks
}
};
}
// Restore normal logging
disableSilentMode();
// Return the next task data with the full tasks array for reference
log.info(`Successfully found next task ${nextTask.id}: ${nextTask.title}`);
return {
success: true,
data: {
nextTask,
allTasks: data.tasks
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error finding next task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to find next task'
}
};
}
};
// Generate cache key using task path
const cacheKey = `nextTask:${tasksPath}`;
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreNextTaskAction,
log
});
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) {
// Catch unexpected errors from getCachedOrExecute itself
log.error(`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
}
// Define the action function to be executed on cache miss
const coreNextTaskAction = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
log.info(`Finding next task from ${tasksPath}`);
// Read tasks data
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
return {
success: false,
error: {
code: 'INVALID_TASKS_FILE',
message: `No valid tasks found in ${tasksPath}`
}
};
}
// Find the next task
const nextTask = findNextTask(data.tasks);
if (!nextTask) {
log.info(
'No eligible next task found. All tasks are either completed or have unsatisfied dependencies'
);
return {
success: true,
data: {
message:
'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',
nextTask: null,
allTasks: data.tasks
}
};
}
// Restore normal logging
disableSilentMode();
// Return the next task data with the full tasks array for reference
log.info(
`Successfully found next task ${nextTask.id}: ${nextTask.title}`
);
return {
success: true,
data: {
nextTask,
allTasks: data.tasks
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error finding next task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to find next task'
}
};
}
};
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreNextTaskAction,
log
});
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) {
// Catch unexpected errors from getCachedOrExecute itself
log.error(
`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`
);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
}

View File

@@ -7,144 +7,172 @@ import path from 'path';
import fs from 'fs';
import { parsePRD } from '../../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import { getAnthropicClientForMCP, getModelConfig } from '../utils/ai-client-utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import {
getAnthropicClientForMCP,
getModelConfig
} from '../utils/ai-client-utils.js';
/**
* Direct function wrapper for parsing PRD documents and generating tasks.
*
*
* @param {Object} args - Command arguments containing input, numTasks or tasks, and output options.
* @param {Object} log - Logger object.
* @param {Object} context - Context object containing session data.
* @returns {Promise<Object>} - Result object with success status and data/error information.
*/
export async function parsePRDDirect(args, log, context = {}) {
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Parsing PRD document with args: ${JSON.stringify(args)}`);
// Initialize AI client for PRD parsing
let aiClient;
try {
aiClient = getAnthropicClientForMCP(session, log);
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
// Parameter validation and path resolution
if (!args.input) {
const errorMessage = 'No input file specified. Please provide an input PRD document path.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_INPUT_FILE', message: errorMessage },
fromCache: false
};
}
// Resolve input path (relative to project root if provided)
const projectRoot = args.projectRoot || process.cwd();
const inputPath = path.isAbsolute(args.input) ? args.input : path.resolve(projectRoot, args.input);
// Determine output path
let outputPath;
if (args.output) {
outputPath = path.isAbsolute(args.output) ? args.output : path.resolve(projectRoot, args.output);
} else {
// Default to tasks/tasks.json in the project root
outputPath = path.resolve(projectRoot, 'tasks', 'tasks.json');
}
// Verify input file exists
if (!fs.existsSync(inputPath)) {
const errorMessage = `Input file not found: ${inputPath}`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INPUT_FILE_NOT_FOUND', message: errorMessage },
fromCache: false
};
}
// Parse number of tasks - handle both string and number values
let numTasks = 10; // Default
if (args.numTasks) {
numTasks = typeof args.numTasks === 'string' ? parseInt(args.numTasks, 10) : args.numTasks;
if (isNaN(numTasks)) {
numTasks = 10; // Fallback to default if parsing fails
log.warn(`Invalid numTasks value: ${args.numTasks}. Using default: 10`);
}
}
log.info(`Preparing to parse PRD from ${inputPath} and output to ${outputPath} with ${numTasks} tasks`);
// Create the logger wrapper for proper logging in the core function
const logWrapper = {
info: (message, ...args) => log.info(message, ...args),
warn: (message, ...args) => log.warn(message, ...args),
error: (message, ...args) => log.error(message, ...args),
debug: (message, ...args) => log.debug && log.debug(message, ...args),
success: (message, ...args) => log.info(message, ...args) // Map success to info
};
const { session } = context; // Only extract session, not reportProgress
// Get model config from session
const modelConfig = getModelConfig(session);
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
try {
// Execute core parsePRD function with AI client
await parsePRD(inputPath, outputPath, numTasks, {
mcpLog: logWrapper,
session
}, aiClient, modelConfig);
// Since parsePRD doesn't return a value but writes to a file, we'll read the result
// to return it to the caller
if (fs.existsSync(outputPath)) {
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
log.info(`Successfully parsed PRD and generated ${tasksData.tasks?.length || 0} tasks`);
return {
success: true,
data: {
message: `Successfully generated ${tasksData.tasks?.length || 0} tasks from PRD`,
taskCount: tasksData.tasks?.length || 0,
outputPath
},
fromCache: false // This operation always modifies state and should never be cached
};
} else {
const errorMessage = `Tasks file was not created at ${outputPath}`;
log.error(errorMessage);
return {
success: false,
error: { code: 'OUTPUT_FILE_NOT_CREATED', message: errorMessage },
fromCache: false
};
}
} finally {
// Always restore normal logging
disableSilentMode();
}
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error parsing PRD: ${error.message}`);
return {
success: false,
error: { code: 'PARSE_PRD_ERROR', message: error.message || 'Unknown error parsing PRD' },
fromCache: false
};
}
}
try {
log.info(`Parsing PRD document with args: ${JSON.stringify(args)}`);
// Initialize AI client for PRD parsing
let aiClient;
try {
aiClient = getAnthropicClientForMCP(session, log);
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
// Parameter validation and path resolution
if (!args.input) {
const errorMessage =
'No input file specified. Please provide an input PRD document path.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_INPUT_FILE', message: errorMessage },
fromCache: false
};
}
// Resolve input path (relative to project root if provided)
const projectRoot = args.projectRoot || process.cwd();
const inputPath = path.isAbsolute(args.input)
? args.input
: path.resolve(projectRoot, args.input);
// Determine output path
let outputPath;
if (args.output) {
outputPath = path.isAbsolute(args.output)
? args.output
: path.resolve(projectRoot, args.output);
} else {
// Default to tasks/tasks.json in the project root
outputPath = path.resolve(projectRoot, 'tasks', 'tasks.json');
}
// Verify input file exists
if (!fs.existsSync(inputPath)) {
const errorMessage = `Input file not found: ${inputPath}`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INPUT_FILE_NOT_FOUND', message: errorMessage },
fromCache: false
};
}
// Parse number of tasks - handle both string and number values
let numTasks = 10; // Default
if (args.numTasks) {
numTasks =
typeof args.numTasks === 'string'
? parseInt(args.numTasks, 10)
: args.numTasks;
if (isNaN(numTasks)) {
numTasks = 10; // Fallback to default if parsing fails
log.warn(`Invalid numTasks value: ${args.numTasks}. Using default: 10`);
}
}
log.info(
`Preparing to parse PRD from ${inputPath} and output to ${outputPath} with ${numTasks} tasks`
);
// Create the logger wrapper for proper logging in the core function
const logWrapper = {
info: (message, ...args) => log.info(message, ...args),
warn: (message, ...args) => log.warn(message, ...args),
error: (message, ...args) => log.error(message, ...args),
debug: (message, ...args) => log.debug && log.debug(message, ...args),
success: (message, ...args) => log.info(message, ...args) // Map success to info
};
// Get model config from session
const modelConfig = getModelConfig(session);
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
try {
// Execute core parsePRD function with AI client
await parsePRD(
inputPath,
outputPath,
numTasks,
{
mcpLog: logWrapper,
session
},
aiClient,
modelConfig
);
// Since parsePRD doesn't return a value but writes to a file, we'll read the result
// to return it to the caller
if (fs.existsSync(outputPath)) {
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
log.info(
`Successfully parsed PRD and generated ${tasksData.tasks?.length || 0} tasks`
);
return {
success: true,
data: {
message: `Successfully generated ${tasksData.tasks?.length || 0} tasks from PRD`,
taskCount: tasksData.tasks?.length || 0,
outputPath
},
fromCache: false // This operation always modifies state and should never be cached
};
} else {
const errorMessage = `Tasks file was not created at ${outputPath}`;
log.error(errorMessage);
return {
success: false,
error: { code: 'OUTPUT_FILE_NOT_CREATED', message: errorMessage },
fromCache: false
};
}
} finally {
// Always restore normal logging
disableSilentMode();
}
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error parsing PRD: ${error.message}`);
return {
success: false,
error: {
code: 'PARSE_PRD_ERROR',
message: error.message || 'Unknown error parsing PRD'
},
fromCache: false
};
}
}

View File

@@ -4,7 +4,10 @@
import { removeDependency } from '../../../../scripts/modules/dependency-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Remove a dependency from a task
@@ -17,67 +20,75 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
*/
export async function removeDependencyDirect(args, log) {
try {
log.info(`Removing dependency with args: ${JSON.stringify(args)}`);
// Validate required parameters
if (!args.id) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID (id) is required'
}
};
}
if (!args.dependsOn) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Dependency ID (dependsOn) is required'
}
};
}
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Format IDs for the core function
const taskId = args.id.includes && args.id.includes('.') ? args.id : parseInt(args.id, 10);
const dependencyId = args.dependsOn.includes && args.dependsOn.includes('.') ? args.dependsOn : parseInt(args.dependsOn, 10);
log.info(`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`);
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the core function
await removeDependency(tasksPath, taskId, dependencyId);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,
taskId: taskId,
dependencyId: dependencyId
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in removeDependencyDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}
try {
log.info(`Removing dependency with args: ${JSON.stringify(args)}`);
// Validate required parameters
if (!args.id) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID (id) is required'
}
};
}
if (!args.dependsOn) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Dependency ID (dependsOn) is required'
}
};
}
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Format IDs for the core function
const taskId =
args.id.includes && args.id.includes('.')
? args.id
: parseInt(args.id, 10);
const dependencyId =
args.dependsOn.includes && args.dependsOn.includes('.')
? args.dependsOn
: parseInt(args.dependsOn, 10);
log.info(
`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`
);
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the core function
await removeDependency(tasksPath, taskId, dependencyId);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,
taskId: taskId,
dependencyId: dependencyId
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error in removeDependencyDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}

View File

@@ -4,7 +4,10 @@
import { removeSubtask } from '../../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Remove a subtask from its parent task
@@ -18,78 +21,86 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
*/
export async function removeSubtaskDirect(args, log) {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
if (!args.id) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Subtask ID is required and must be in format "parentId.subtaskId"'
}
};
}
// Validate subtask ID format
if (!args.id.includes('.')) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: `Invalid subtask ID format: ${args.id}. Expected format: "parentId.subtaskId"`
}
};
}
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Convert convertToTask to a boolean
const convertToTask = args.convert === true;
// Determine if we should generate files
const generateFiles = !args.skipGenerate;
log.info(`Removing subtask ${args.id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`);
const result = await removeSubtask(tasksPath, args.id, convertToTask, generateFiles);
// Restore normal logging
disableSilentMode();
if (convertToTask && result) {
// Return info about the converted task
return {
success: true,
data: {
message: `Subtask ${args.id} successfully converted to task #${result.id}`,
task: result
}
};
} else {
// Return simple success message for deletion
return {
success: true,
data: {
message: `Subtask ${args.id} successfully removed`
}
};
}
} catch (error) {
// Ensure silent mode is disabled even if an outer error occurs
disableSilentMode();
log.error(`Error in removeSubtaskDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
if (!args.id) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message:
'Subtask ID is required and must be in format "parentId.subtaskId"'
}
};
}
// Validate subtask ID format
if (!args.id.includes('.')) {
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: `Invalid subtask ID format: ${args.id}. Expected format: "parentId.subtaskId"`
}
};
}
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Convert convertToTask to a boolean
const convertToTask = args.convert === true;
// Determine if we should generate files
const generateFiles = !args.skipGenerate;
log.info(
`Removing subtask ${args.id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`
);
const result = await removeSubtask(
tasksPath,
args.id,
convertToTask,
generateFiles
);
// Restore normal logging
disableSilentMode();
if (convertToTask && result) {
// Return info about the converted task
return {
success: true,
data: {
message: `Subtask ${args.id} successfully converted to task #${result.id}`,
task: result
}
};
} else {
// Return simple success message for deletion
return {
success: true,
data: {
message: `Subtask ${args.id} successfully removed`
}
};
}
} catch (error) {
// Ensure silent mode is disabled even if an outer error occurs
disableSilentMode();
log.error(`Error in removeSubtaskDirect: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message
}
};
}
}

View File

@@ -4,7 +4,10 @@
*/
import { removeTask } from '../../../../scripts/modules/task-manager.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
/**
@@ -15,90 +18,90 @@ import { findTasksJsonPath } from '../utils/path-utils.js';
* @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: false }
*/
export async function removeTaskDirect(args, log) {
try {
// Find the tasks path first
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Tasks file not found: ${error.message}`);
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: error.message
},
fromCache: false
};
}
// Validate task ID parameter
const taskId = args.id;
if (!taskId) {
log.error('Task ID is required');
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID is required'
},
fromCache: false
};
}
// Skip confirmation in the direct function since it's handled by the client
log.info(`Removing task with ID: ${taskId} from ${tasksPath}`);
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the core removeTask function
const result = await removeTask(tasksPath, taskId);
// Restore normal logging
disableSilentMode();
log.info(`Successfully removed task: ${taskId}`);
// Return the result
return {
success: true,
data: {
message: result.message,
taskId: taskId,
tasksPath: tasksPath,
removedTask: result.removedTask
},
fromCache: false
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error removing task: ${error.message}`);
return {
success: false,
error: {
code: error.code || 'REMOVE_TASK_ERROR',
message: error.message || 'Failed to remove task'
},
fromCache: false
};
}
} catch (error) {
// Ensure silent mode is disabled even if an outer error occurs
disableSilentMode();
// Catch any unexpected errors
log.error(`Unexpected error in removeTaskDirect: ${error.message}`);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
}
try {
// Find the tasks path first
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Tasks file not found: ${error.message}`);
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: error.message
},
fromCache: false
};
}
// Validate task ID parameter
const taskId = args.id;
if (!taskId) {
log.error('Task ID is required');
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID is required'
},
fromCache: false
};
}
// Skip confirmation in the direct function since it's handled by the client
log.info(`Removing task with ID: ${taskId} from ${tasksPath}`);
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the core removeTask function
const result = await removeTask(tasksPath, taskId);
// Restore normal logging
disableSilentMode();
log.info(`Successfully removed task: ${taskId}`);
// Return the result
return {
success: true,
data: {
message: result.message,
taskId: taskId,
tasksPath: tasksPath,
removedTask: result.removedTask
},
fromCache: false
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error removing task: ${error.message}`);
return {
success: false,
error: {
code: error.code || 'REMOVE_TASK_ERROR',
message: error.message || 'Failed to remove task'
},
fromCache: false
};
}
} catch (error) {
// Ensure silent mode is disabled even if an outer error occurs
disableSilentMode();
// Catch any unexpected errors
log.error(`Unexpected error in removeTaskDirect: ${error.message}`);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
}

View File

@@ -5,108 +5,120 @@
import { setTaskStatus } from '../../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode,
isSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Direct function wrapper for setTaskStatus with error handling.
*
*
* @param {Object} args - Command arguments containing id, status and file path options.
* @param {Object} log - Logger object.
* @returns {Promise<Object>} - Result object with success status and data/error information.
*/
export async function setTaskStatusDirect(args, log) {
try {
log.info(`Setting task status with args: ${JSON.stringify(args)}`);
// Check required parameters
if (!args.id) {
const errorMessage = 'No task ID specified. Please provide a task ID to update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_TASK_ID', message: errorMessage },
fromCache: false
};
}
if (!args.status) {
const errorMessage = 'No status specified. Please provide a new status value.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_STATUS', message: errorMessage },
fromCache: false
};
}
// Get tasks file path
let tasksPath;
try {
// The enhanced findTasksJsonPath will now search in parent directories if needed
tasksPath = findTasksJsonPath(args, log);
log.info(`Found tasks file at: ${tasksPath}`);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: {
code: 'TASKS_FILE_ERROR',
message: `${error.message}\n\nPlease ensure you are in a Task Master project directory or use the --project-root parameter to specify the path to your project.`
},
fromCache: false
};
}
// Execute core setTaskStatus function
const taskId = args.id;
const newStatus = args.status;
log.info(`Setting task ${taskId} status to "${newStatus}"`);
// Call the core function with proper silent mode handling
let result;
enableSilentMode(); // Enable silent mode before calling core function
try {
// Call the core function
await setTaskStatus(tasksPath, taskId, newStatus, { mcpLog: log });
log.info(`Successfully set task ${taskId} status to ${newStatus}`);
// Return success data
result = {
success: true,
data: {
message: `Successfully updated task ${taskId} status to "${newStatus}"`,
taskId,
status: newStatus,
tasksPath
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
log.error(`Error setting task status: ${error.message}`);
result = {
success: false,
error: { code: 'SET_STATUS_ERROR', message: error.message || 'Unknown error setting task status' },
fromCache: false
};
} finally {
// ALWAYS restore normal logging in finally block
disableSilentMode();
}
return result;
} catch (error) {
// Ensure silent mode is disabled if there was an uncaught error in the outer try block
if (isSilentMode()) {
disableSilentMode();
}
log.error(`Error setting task status: ${error.message}`);
return {
success: false,
error: { code: 'SET_STATUS_ERROR', message: error.message || 'Unknown error setting task status' },
fromCache: false
};
}
}
try {
log.info(`Setting task status with args: ${JSON.stringify(args)}`);
// Check required parameters
if (!args.id) {
const errorMessage =
'No task ID specified. Please provide a task ID to update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_TASK_ID', message: errorMessage },
fromCache: false
};
}
if (!args.status) {
const errorMessage =
'No status specified. Please provide a new status value.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_STATUS', message: errorMessage },
fromCache: false
};
}
// Get tasks file path
let tasksPath;
try {
// The enhanced findTasksJsonPath will now search in parent directories if needed
tasksPath = findTasksJsonPath(args, log);
log.info(`Found tasks file at: ${tasksPath}`);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: {
code: 'TASKS_FILE_ERROR',
message: `${error.message}\n\nPlease ensure you are in a Task Master project directory or use the --project-root parameter to specify the path to your project.`
},
fromCache: false
};
}
// Execute core setTaskStatus function
const taskId = args.id;
const newStatus = args.status;
log.info(`Setting task ${taskId} status to "${newStatus}"`);
// Call the core function with proper silent mode handling
let result;
enableSilentMode(); // Enable silent mode before calling core function
try {
// Call the core function
await setTaskStatus(tasksPath, taskId, newStatus, { mcpLog: log });
log.info(`Successfully set task ${taskId} status to ${newStatus}`);
// Return success data
result = {
success: true,
data: {
message: `Successfully updated task ${taskId} status to "${newStatus}"`,
taskId,
status: newStatus,
tasksPath
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
log.error(`Error setting task status: ${error.message}`);
result = {
success: false,
error: {
code: 'SET_STATUS_ERROR',
message: error.message || 'Unknown error setting task status'
},
fromCache: false
};
} finally {
// ALWAYS restore normal logging in finally block
disableSilentMode();
}
return result;
} catch (error) {
// Ensure silent mode is disabled if there was an uncaught error in the outer try block
if (isSilentMode()) {
disableSilentMode();
}
log.error(`Error setting task status: ${error.message}`);
return {
success: false,
error: {
code: 'SET_STATUS_ERROR',
message: error.message || 'Unknown error setting task status'
},
fromCache: false
};
}
}

View File

@@ -7,7 +7,10 @@ import { findTaskById } from '../../../../scripts/modules/utils.js';
import { readJSON } from '../../../../scripts/modules/utils.js';
import { getCachedOrExecute } from '../../tools/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Direct function wrapper for showing task details with error handling and caching.
@@ -17,120 +20,122 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
* @returns {Promise<Object>} - Task details result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
*/
export async function showTaskDirect(args, log) {
let tasksPath;
try {
// Find the tasks path first - needed for cache key and execution
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Tasks file not found: ${error.message}`);
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: error.message
},
fromCache: false
};
}
let tasksPath;
try {
// Find the tasks path first - needed for cache key and execution
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Tasks file not found: ${error.message}`);
return {
success: false,
error: {
code: 'FILE_NOT_FOUND_ERROR',
message: error.message
},
fromCache: false
};
}
// Validate task ID
const taskId = args.id;
if (!taskId) {
log.error('Task ID is required');
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID is required'
},
fromCache: false
};
}
// Validate task ID
const taskId = args.id;
if (!taskId) {
log.error('Task ID is required');
return {
success: false,
error: {
code: 'INPUT_VALIDATION_ERROR',
message: 'Task ID is required'
},
fromCache: false
};
}
// Generate cache key using task path and ID
const cacheKey = `showTask:${tasksPath}:${taskId}`;
// Define the action function to be executed on cache miss
const coreShowTaskAction = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
log.info(`Retrieving task details for ID: ${taskId} from ${tasksPath}`);
// Read tasks data
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
return {
success: false,
error: {
code: 'INVALID_TASKS_FILE',
message: `No valid tasks found in ${tasksPath}`
}
};
}
// Find the specific task
const task = findTaskById(data.tasks, taskId);
if (!task) {
return {
success: false,
error: {
code: 'TASK_NOT_FOUND',
message: `Task with ID ${taskId} not found`
}
};
}
// Restore normal logging
disableSilentMode();
// Return the task data with the full tasks array for reference
// (needed for formatDependenciesWithStatus function in UI)
log.info(`Successfully found task ${taskId}`);
return {
success: true,
data: {
task,
allTasks: data.tasks
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error showing task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to show task details'
}
};
}
};
// Generate cache key using task path and ID
const cacheKey = `showTask:${tasksPath}:${taskId}`;
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreShowTaskAction,
log
});
log.info(`showTaskDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) {
// Catch unexpected errors from getCachedOrExecute itself
disableSilentMode();
log.error(`Unexpected error during getCachedOrExecute for showTask: ${error.message}`);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
}
// Define the action function to be executed on cache miss
const coreShowTaskAction = async () => {
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
log.info(`Retrieving task details for ID: ${taskId} from ${tasksPath}`);
// Read tasks data
const data = readJSON(tasksPath);
if (!data || !data.tasks) {
return {
success: false,
error: {
code: 'INVALID_TASKS_FILE',
message: `No valid tasks found in ${tasksPath}`
}
};
}
// Find the specific task
const task = findTaskById(data.tasks, taskId);
if (!task) {
return {
success: false,
error: {
code: 'TASK_NOT_FOUND',
message: `Task with ID ${taskId} not found`
}
};
}
// Restore normal logging
disableSilentMode();
// Return the task data with the full tasks array for reference
// (needed for formatDependenciesWithStatus function in UI)
log.info(`Successfully found task ${taskId}`);
return {
success: true,
data: {
task,
allTasks: data.tasks
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error showing task: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
message: error.message || 'Failed to show task details'
}
};
}
};
// Use the caching utility
try {
const result = await getCachedOrExecute({
cacheKey,
actionFn: coreShowTaskAction,
log
});
log.info(`showTaskDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) {
// Catch unexpected errors from getCachedOrExecute itself
disableSilentMode();
log.error(
`Unexpected error during getCachedOrExecute for showTask: ${error.message}`
);
return {
success: false,
error: {
code: 'UNEXPECTED_ERROR',
message: error.message
},
fromCache: false
};
}
}

View File

@@ -4,167 +4,190 @@
*/
import { updateSubtaskById } from '../../../../scripts/modules/task-manager.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { getAnthropicClientForMCP, getPerplexityClientForMCP } from '../utils/ai-client-utils.js';
import {
getAnthropicClientForMCP,
getPerplexityClientForMCP
} from '../utils/ai-client-utils.js';
/**
* Direct function wrapper for updateSubtaskById with error handling.
*
*
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
* @param {Object} log - Logger object.
* @param {Object} context - Context object containing session data.
* @returns {Promise<Object>} - Result object with success status and data/error information.
*/
export async function updateSubtaskByIdDirect(args, log, context = {}) {
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
// Check required parameters
if (!args.id) {
const errorMessage = 'No subtask ID specified. Please provide a subtask ID to update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_SUBTASK_ID', message: errorMessage },
fromCache: false
};
}
if (!args.prompt) {
const errorMessage = 'No prompt specified. Please provide a prompt with information to add to the subtask.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_PROMPT', message: errorMessage },
fromCache: false
};
}
// Validate subtask ID format
const subtaskId = args.id;
if (typeof subtaskId !== 'string' && typeof subtaskId !== 'number') {
const errorMessage = `Invalid subtask ID type: ${typeof subtaskId}. Subtask ID must be a string or number.`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage },
fromCache: false
};
}
const subtaskIdStr = String(subtaskId);
if (!subtaskIdStr.includes('.')) {
const errorMessage = `Invalid subtask ID format: ${subtaskIdStr}. Subtask ID must be in format "parentId.subtaskId" (e.g., "5.2").`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage },
fromCache: false
};
}
// Get tasks file path
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: { code: 'TASKS_FILE_ERROR', message: error.message },
fromCache: false
};
}
// Get research flag
const useResearch = args.research === true;
log.info(`Updating subtask with ID ${subtaskIdStr} with prompt "${args.prompt}" and research: ${useResearch}`);
// Initialize the appropriate AI client based on research flag
try {
if (useResearch) {
// Initialize Perplexity client
await getPerplexityClientForMCP(session);
} else {
// Initialize Anthropic client
await getAnthropicClientForMCP(session);
}
} catch (error) {
log.error(`AI client initialization error: ${error.message}`);
return {
success: false,
error: { code: 'AI_CLIENT_ERROR', message: error.message || 'Failed to initialize AI client' },
fromCache: false
};
}
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Create a logger wrapper object to handle logging without breaking the mcpLog[level] calls
// This ensures outputFormat is set to 'json' while still supporting proper logging
const logWrapper = {
info: (message) => log.info(message),
warn: (message) => log.warn(message),
error: (message) => log.error(message),
debug: (message) => log.debug && log.debug(message),
success: (message) => log.info(message) // Map success to info if needed
};
// Execute core updateSubtaskById function
// Pass both session and logWrapper as mcpLog to ensure outputFormat is 'json'
const updatedSubtask = await updateSubtaskById(tasksPath, subtaskIdStr, args.prompt, useResearch, {
session,
mcpLog: logWrapper
});
// Restore normal logging
disableSilentMode();
// Handle the case where the subtask couldn't be updated (e.g., already marked as done)
if (!updatedSubtask) {
return {
success: false,
error: {
code: 'SUBTASK_UPDATE_FAILED',
message: 'Failed to update subtask. It may be marked as completed, or another error occurred.'
},
fromCache: false
};
}
// Return the updated subtask information
return {
success: true,
data: {
message: `Successfully updated subtask with ID ${subtaskIdStr}`,
subtaskId: subtaskIdStr,
parentId: subtaskIdStr.split('.')[0],
subtask: updatedSubtask,
tasksPath,
useResearch
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
throw error; // Rethrow to be caught by outer catch block
}
} catch (error) {
// Ensure silent mode is disabled
disableSilentMode();
log.error(`Error updating subtask by ID: ${error.message}`);
return {
success: false,
error: { code: 'UPDATE_SUBTASK_ERROR', message: error.message || 'Unknown error updating subtask' },
fromCache: false
};
}
}
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
// Check required parameters
if (!args.id) {
const errorMessage =
'No subtask ID specified. Please provide a subtask ID to update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_SUBTASK_ID', message: errorMessage },
fromCache: false
};
}
if (!args.prompt) {
const errorMessage =
'No prompt specified. Please provide a prompt with information to add to the subtask.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_PROMPT', message: errorMessage },
fromCache: false
};
}
// Validate subtask ID format
const subtaskId = args.id;
if (typeof subtaskId !== 'string' && typeof subtaskId !== 'number') {
const errorMessage = `Invalid subtask ID type: ${typeof subtaskId}. Subtask ID must be a string or number.`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage },
fromCache: false
};
}
const subtaskIdStr = String(subtaskId);
if (!subtaskIdStr.includes('.')) {
const errorMessage = `Invalid subtask ID format: ${subtaskIdStr}. Subtask ID must be in format "parentId.subtaskId" (e.g., "5.2").`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage },
fromCache: false
};
}
// Get tasks file path
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: { code: 'TASKS_FILE_ERROR', message: error.message },
fromCache: false
};
}
// Get research flag
const useResearch = args.research === true;
log.info(
`Updating subtask with ID ${subtaskIdStr} with prompt "${args.prompt}" and research: ${useResearch}`
);
// Initialize the appropriate AI client based on research flag
try {
if (useResearch) {
// Initialize Perplexity client
await getPerplexityClientForMCP(session);
} else {
// Initialize Anthropic client
await getAnthropicClientForMCP(session);
}
} catch (error) {
log.error(`AI client initialization error: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: error.message || 'Failed to initialize AI client'
},
fromCache: false
};
}
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Create a logger wrapper object to handle logging without breaking the mcpLog[level] calls
// This ensures outputFormat is set to 'json' while still supporting proper logging
const logWrapper = {
info: (message) => log.info(message),
warn: (message) => log.warn(message),
error: (message) => log.error(message),
debug: (message) => log.debug && log.debug(message),
success: (message) => log.info(message) // Map success to info if needed
};
// Execute core updateSubtaskById function
// Pass both session and logWrapper as mcpLog to ensure outputFormat is 'json'
const updatedSubtask = await updateSubtaskById(
tasksPath,
subtaskIdStr,
args.prompt,
useResearch,
{
session,
mcpLog: logWrapper
}
);
// Restore normal logging
disableSilentMode();
// Handle the case where the subtask couldn't be updated (e.g., already marked as done)
if (!updatedSubtask) {
return {
success: false,
error: {
code: 'SUBTASK_UPDATE_FAILED',
message:
'Failed to update subtask. It may be marked as completed, or another error occurred.'
},
fromCache: false
};
}
// Return the updated subtask information
return {
success: true,
data: {
message: `Successfully updated subtask with ID ${subtaskIdStr}`,
subtaskId: subtaskIdStr,
parentId: subtaskIdStr.split('.')[0],
subtask: updatedSubtask,
tasksPath,
useResearch
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
throw error; // Rethrow to be caught by outer catch block
}
} catch (error) {
// Ensure silent mode is disabled
disableSilentMode();
log.error(`Error updating subtask by ID: ${error.message}`);
return {
success: false,
error: {
code: 'UPDATE_SUBTASK_ERROR',
message: error.message || 'Unknown error updating subtask'
},
fromCache: false
};
}
}

View File

@@ -5,168 +5,181 @@
import { updateTaskById } from '../../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
getAnthropicClientForMCP,
getPerplexityClientForMCP
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import {
getAnthropicClientForMCP,
getPerplexityClientForMCP
} from '../utils/ai-client-utils.js';
/**
* Direct function wrapper for updateTaskById with error handling.
*
*
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
* @param {Object} log - Logger object.
* @param {Object} context - Context object containing session data.
* @returns {Promise<Object>} - Result object with success status and data/error information.
*/
export async function updateTaskByIdDirect(args, log, context = {}) {
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Updating task with args: ${JSON.stringify(args)}`);
// Check required parameters
if (!args.id) {
const errorMessage = 'No task ID specified. Please provide a task ID to update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_TASK_ID', message: errorMessage },
fromCache: false
};
}
if (!args.prompt) {
const errorMessage = 'No prompt specified. Please provide a prompt with new information for the task update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_PROMPT', message: errorMessage },
fromCache: false
};
}
// Parse taskId - handle both string and number values
let taskId;
if (typeof args.id === 'string') {
// Handle subtask IDs (e.g., "5.2")
if (args.id.includes('.')) {
taskId = args.id; // Keep as string for subtask IDs
} else {
// Parse as integer for main task IDs
taskId = parseInt(args.id, 10);
if (isNaN(taskId)) {
const errorMessage = `Invalid task ID: ${args.id}. Task ID must be a positive integer or subtask ID (e.g., "5.2").`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_TASK_ID', message: errorMessage },
fromCache: false
};
}
}
} else {
taskId = args.id;
}
// Get tasks file path
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: { code: 'TASKS_FILE_ERROR', message: error.message },
fromCache: false
};
}
// Get research flag
const useResearch = args.research === true;
// Initialize appropriate AI client based on research flag
let aiClient;
try {
if (useResearch) {
log.info('Using Perplexity AI for research-backed task update');
aiClient = await getPerplexityClientForMCP(session, log);
} else {
log.info('Using Claude AI for task update');
aiClient = getAnthropicClientForMCP(session, log);
}
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
log.info(`Updating task with ID ${taskId} with prompt "${args.prompt}" and research: ${useResearch}`);
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Create a logger wrapper that matches what updateTaskById expects
const logWrapper = {
info: (message) => log.info(message),
warn: (message) => log.warn(message),
error: (message) => log.error(message),
debug: (message) => log.debug && log.debug(message),
success: (message) => log.info(message) // Map success to info since many loggers don't have success
};
// Execute core updateTaskById function with proper parameters
await updateTaskById(
tasksPath,
taskId,
args.prompt,
useResearch,
{
mcpLog: logWrapper, // Use our wrapper object that has the expected method structure
session
},
'json'
);
// Since updateTaskById doesn't return a value but modifies the tasks file,
// we'll return a success message
return {
success: true,
data: {
message: `Successfully updated task with ID ${taskId} based on the prompt`,
taskId,
tasksPath,
useResearch
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
log.error(`Error updating task by ID: ${error.message}`);
return {
success: false,
error: { code: 'UPDATE_TASK_ERROR', message: error.message || 'Unknown error updating task' },
fromCache: false
};
} finally {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
}
} catch (error) {
// Ensure silent mode is disabled
disableSilentMode();
log.error(`Error updating task by ID: ${error.message}`);
return {
success: false,
error: { code: 'UPDATE_TASK_ERROR', message: error.message || 'Unknown error updating task' },
fromCache: false
};
}
}
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Updating task with args: ${JSON.stringify(args)}`);
// Check required parameters
if (!args.id) {
const errorMessage =
'No task ID specified. Please provide a task ID to update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_TASK_ID', message: errorMessage },
fromCache: false
};
}
if (!args.prompt) {
const errorMessage =
'No prompt specified. Please provide a prompt with new information for the task update.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_PROMPT', message: errorMessage },
fromCache: false
};
}
// Parse taskId - handle both string and number values
let taskId;
if (typeof args.id === 'string') {
// Handle subtask IDs (e.g., "5.2")
if (args.id.includes('.')) {
taskId = args.id; // Keep as string for subtask IDs
} else {
// Parse as integer for main task IDs
taskId = parseInt(args.id, 10);
if (isNaN(taskId)) {
const errorMessage = `Invalid task ID: ${args.id}. Task ID must be a positive integer or subtask ID (e.g., "5.2").`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_TASK_ID', message: errorMessage },
fromCache: false
};
}
}
} else {
taskId = args.id;
}
// Get tasks file path
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: { code: 'TASKS_FILE_ERROR', message: error.message },
fromCache: false
};
}
// Get research flag
const useResearch = args.research === true;
// Initialize appropriate AI client based on research flag
let aiClient;
try {
if (useResearch) {
log.info('Using Perplexity AI for research-backed task update');
aiClient = await getPerplexityClientForMCP(session, log);
} else {
log.info('Using Claude AI for task update');
aiClient = getAnthropicClientForMCP(session, log);
}
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
log.info(
`Updating task with ID ${taskId} with prompt "${args.prompt}" and research: ${useResearch}`
);
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Create a logger wrapper that matches what updateTaskById expects
const logWrapper = {
info: (message) => log.info(message),
warn: (message) => log.warn(message),
error: (message) => log.error(message),
debug: (message) => log.debug && log.debug(message),
success: (message) => log.info(message) // Map success to info since many loggers don't have success
};
// Execute core updateTaskById function with proper parameters
await updateTaskById(
tasksPath,
taskId,
args.prompt,
useResearch,
{
mcpLog: logWrapper, // Use our wrapper object that has the expected method structure
session
},
'json'
);
// Since updateTaskById doesn't return a value but modifies the tasks file,
// we'll return a success message
return {
success: true,
data: {
message: `Successfully updated task with ID ${taskId} based on the prompt`,
taskId,
tasksPath,
useResearch
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
log.error(`Error updating task by ID: ${error.message}`);
return {
success: false,
error: {
code: 'UPDATE_TASK_ERROR',
message: error.message || 'Unknown error updating task'
},
fromCache: false
};
} finally {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
}
} catch (error) {
// Ensure silent mode is disabled
disableSilentMode();
log.error(`Error updating task by ID: ${error.message}`);
return {
success: false,
error: {
code: 'UPDATE_TASK_ERROR',
message: error.message || 'Unknown error updating task'
},
fromCache: false
};
}
}

View File

@@ -4,168 +4,177 @@
*/
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import {
getAnthropicClientForMCP,
getPerplexityClientForMCP
import {
getAnthropicClientForMCP,
getPerplexityClientForMCP
} from '../utils/ai-client-utils.js';
/**
* Direct function wrapper for updating tasks based on new context/prompt.
*
*
* @param {Object} args - Command arguments containing fromId, prompt, useResearch and file path options.
* @param {Object} log - Logger object.
* @param {Object} context - Context object containing session data.
* @returns {Promise<Object>} - Result object with success status and data/error information.
*/
export async function updateTasksDirect(args, log, context = {}) {
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
// Check for the common mistake of using 'id' instead of 'from'
if (args.id !== undefined && args.from === undefined) {
const errorMessage = "You specified 'id' parameter but 'update' requires 'from' parameter. Use 'from' for this tool or use 'update_task' tool if you want to update a single task.";
log.error(errorMessage);
return {
success: false,
error: {
code: 'PARAMETER_MISMATCH',
message: errorMessage,
suggestion: "Use 'from' parameter instead of 'id', or use the 'update_task' tool for single task updates"
},
fromCache: false
};
}
// Check required parameters
if (!args.from) {
const errorMessage = 'No from ID specified. Please provide a task ID to start updating from.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_FROM_ID', message: errorMessage },
fromCache: false
};
}
if (!args.prompt) {
const errorMessage = 'No prompt specified. Please provide a prompt with new context for task updates.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_PROMPT', message: errorMessage },
fromCache: false
};
}
// Parse fromId - handle both string and number values
let fromId;
if (typeof args.from === 'string') {
fromId = parseInt(args.from, 10);
if (isNaN(fromId)) {
const errorMessage = `Invalid from ID: ${args.from}. Task ID must be a positive integer.`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_FROM_ID', message: errorMessage },
fromCache: false
};
}
} else {
fromId = args.from;
}
// Get tasks file path
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: { code: 'TASKS_FILE_ERROR', message: error.message },
fromCache: false
};
}
// Get research flag
const useResearch = args.research === true;
// Initialize appropriate AI client based on research flag
let aiClient;
try {
if (useResearch) {
log.info('Using Perplexity AI for research-backed task updates');
aiClient = await getPerplexityClientForMCP(session, log);
} else {
log.info('Using Claude AI for task updates');
aiClient = getAnthropicClientForMCP(session, log);
}
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
log.info(`Updating tasks from ID ${fromId} with prompt "${args.prompt}" and research: ${useResearch}`);
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Execute core updateTasks function, passing the AI client and session
await updateTasks(
tasksPath,
fromId,
args.prompt,
useResearch,
{
mcpLog: log,
session
}
);
// Since updateTasks doesn't return a value but modifies the tasks file,
// we'll return a success message
return {
success: true,
data: {
message: `Successfully updated tasks from ID ${fromId} based on the prompt`,
fromId,
tasksPath,
useResearch
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
log.error(`Error updating tasks: ${error.message}`);
return {
success: false,
error: { code: 'UPDATE_TASKS_ERROR', message: error.message || 'Unknown error updating tasks' },
fromCache: false
};
} finally {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
}
} catch (error) {
// Ensure silent mode is disabled
disableSilentMode();
log.error(`Error updating tasks: ${error.message}`);
return {
success: false,
error: { code: 'UPDATE_TASKS_ERROR', message: error.message || 'Unknown error updating tasks' },
fromCache: false
};
}
}
const { session } = context; // Only extract session, not reportProgress
try {
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
// Check for the common mistake of using 'id' instead of 'from'
if (args.id !== undefined && args.from === undefined) {
const errorMessage =
"You specified 'id' parameter but 'update' requires 'from' parameter. Use 'from' for this tool or use 'update_task' tool if you want to update a single task.";
log.error(errorMessage);
return {
success: false,
error: {
code: 'PARAMETER_MISMATCH',
message: errorMessage,
suggestion:
"Use 'from' parameter instead of 'id', or use the 'update_task' tool for single task updates"
},
fromCache: false
};
}
// Check required parameters
if (!args.from) {
const errorMessage =
'No from ID specified. Please provide a task ID to start updating from.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_FROM_ID', message: errorMessage },
fromCache: false
};
}
if (!args.prompt) {
const errorMessage =
'No prompt specified. Please provide a prompt with new context for task updates.';
log.error(errorMessage);
return {
success: false,
error: { code: 'MISSING_PROMPT', message: errorMessage },
fromCache: false
};
}
// Parse fromId - handle both string and number values
let fromId;
if (typeof args.from === 'string') {
fromId = parseInt(args.from, 10);
if (isNaN(fromId)) {
const errorMessage = `Invalid from ID: ${args.from}. Task ID must be a positive integer.`;
log.error(errorMessage);
return {
success: false,
error: { code: 'INVALID_FROM_ID', message: errorMessage },
fromCache: false
};
}
} else {
fromId = args.from;
}
// Get tasks file path
let tasksPath;
try {
tasksPath = findTasksJsonPath(args, log);
} catch (error) {
log.error(`Error finding tasks file: ${error.message}`);
return {
success: false,
error: { code: 'TASKS_FILE_ERROR', message: error.message },
fromCache: false
};
}
// Get research flag
const useResearch = args.research === true;
// Initialize appropriate AI client based on research flag
let aiClient;
try {
if (useResearch) {
log.info('Using Perplexity AI for research-backed task updates');
aiClient = await getPerplexityClientForMCP(session, log);
} else {
log.info('Using Claude AI for task updates');
aiClient = getAnthropicClientForMCP(session, log);
}
} catch (error) {
log.error(`Failed to initialize AI client: ${error.message}`);
return {
success: false,
error: {
code: 'AI_CLIENT_ERROR',
message: `Cannot initialize AI client: ${error.message}`
},
fromCache: false
};
}
log.info(
`Updating tasks from ID ${fromId} with prompt "${args.prompt}" and research: ${useResearch}`
);
try {
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Execute core updateTasks function, passing the AI client and session
await updateTasks(tasksPath, fromId, args.prompt, useResearch, {
mcpLog: log,
session
});
// Since updateTasks doesn't return a value but modifies the tasks file,
// we'll return a success message
return {
success: true,
data: {
message: `Successfully updated tasks from ID ${fromId} based on the prompt`,
fromId,
tasksPath,
useResearch
},
fromCache: false // This operation always modifies state and should never be cached
};
} catch (error) {
log.error(`Error updating tasks: ${error.message}`);
return {
success: false,
error: {
code: 'UPDATE_TASKS_ERROR',
message: error.message || 'Unknown error updating tasks'
},
fromCache: false
};
} finally {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
}
} catch (error) {
// Ensure silent mode is disabled
disableSilentMode();
log.error(`Error updating tasks: ${error.message}`);
return {
success: false,
error: {
code: 'UPDATE_TASKS_ERROR',
message: error.message || 'Unknown error updating tasks'
},
fromCache: false
};
}
}

View File

@@ -4,7 +4,10 @@
import { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
import { findTasksJsonPath } from '../utils/path-utils.js';
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
import fs from 'fs';
/**
@@ -16,50 +19,50 @@ import fs from 'fs';
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
*/
export async function validateDependenciesDirect(args, log) {
try {
log.info(`Validating dependencies in tasks...`);
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Verify the file exists
if (!fs.existsSync(tasksPath)) {
return {
success: false,
error: {
code: 'FILE_NOT_FOUND',
message: `Tasks file not found at ${tasksPath}`
}
};
}
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the original command function
await validateDependenciesCommand(tasksPath);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: 'Dependencies validated successfully',
tasksPath
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error validating dependencies: ${error.message}`);
return {
success: false,
error: {
code: 'VALIDATION_ERROR',
message: error.message
}
};
}
}
try {
log.info(`Validating dependencies in tasks...`);
// Find the tasks.json path
const tasksPath = findTasksJsonPath(args, log);
// Verify the file exists
if (!fs.existsSync(tasksPath)) {
return {
success: false,
error: {
code: 'FILE_NOT_FOUND',
message: `Tasks file not found at ${tasksPath}`
}
};
}
// Enable silent mode to prevent console logs from interfering with JSON response
enableSilentMode();
// Call the original command function
await validateDependenciesCommand(tasksPath);
// Restore normal logging
disableSilentMode();
return {
success: true,
data: {
message: 'Dependencies validated successfully',
tasksPath
}
};
} catch (error) {
// Make sure to restore normal logging even if there's an error
disableSilentMode();
log.error(`Error validating dependencies: ${error.message}`);
return {
success: false,
error: {
code: 'VALIDATION_ERROR',
message: error.message
}
};
}
}

View File

@@ -33,64 +33,64 @@ import { removeTaskDirect } from './direct-functions/remove-task.js';
export { findTasksJsonPath } from './utils/path-utils.js';
// Re-export AI client utilities
export {
getAnthropicClientForMCP,
getPerplexityClientForMCP,
getModelConfig,
getBestAvailableAIModel,
handleClaudeError
export {
getAnthropicClientForMCP,
getPerplexityClientForMCP,
getModelConfig,
getBestAvailableAIModel,
handleClaudeError
} from './utils/ai-client-utils.js';
// Use Map for potential future enhancements like introspection or dynamic dispatch
export const directFunctions = new Map([
['listTasksDirect', listTasksDirect],
['getCacheStatsDirect', getCacheStatsDirect],
['parsePRDDirect', parsePRDDirect],
['updateTasksDirect', updateTasksDirect],
['updateTaskByIdDirect', updateTaskByIdDirect],
['updateSubtaskByIdDirect', updateSubtaskByIdDirect],
['generateTaskFilesDirect', generateTaskFilesDirect],
['setTaskStatusDirect', setTaskStatusDirect],
['showTaskDirect', showTaskDirect],
['nextTaskDirect', nextTaskDirect],
['expandTaskDirect', expandTaskDirect],
['addTaskDirect', addTaskDirect],
['addSubtaskDirect', addSubtaskDirect],
['removeSubtaskDirect', removeSubtaskDirect],
['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],
['clearSubtasksDirect', clearSubtasksDirect],
['expandAllTasksDirect', expandAllTasksDirect],
['removeDependencyDirect', removeDependencyDirect],
['validateDependenciesDirect', validateDependenciesDirect],
['fixDependenciesDirect', fixDependenciesDirect],
['complexityReportDirect', complexityReportDirect],
['addDependencyDirect', addDependencyDirect],
['removeTaskDirect', removeTaskDirect]
['listTasksDirect', listTasksDirect],
['getCacheStatsDirect', getCacheStatsDirect],
['parsePRDDirect', parsePRDDirect],
['updateTasksDirect', updateTasksDirect],
['updateTaskByIdDirect', updateTaskByIdDirect],
['updateSubtaskByIdDirect', updateSubtaskByIdDirect],
['generateTaskFilesDirect', generateTaskFilesDirect],
['setTaskStatusDirect', setTaskStatusDirect],
['showTaskDirect', showTaskDirect],
['nextTaskDirect', nextTaskDirect],
['expandTaskDirect', expandTaskDirect],
['addTaskDirect', addTaskDirect],
['addSubtaskDirect', addSubtaskDirect],
['removeSubtaskDirect', removeSubtaskDirect],
['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],
['clearSubtasksDirect', clearSubtasksDirect],
['expandAllTasksDirect', expandAllTasksDirect],
['removeDependencyDirect', removeDependencyDirect],
['validateDependenciesDirect', validateDependenciesDirect],
['fixDependenciesDirect', fixDependenciesDirect],
['complexityReportDirect', complexityReportDirect],
['addDependencyDirect', addDependencyDirect],
['removeTaskDirect', removeTaskDirect]
]);
// Re-export all direct function implementations
export {
listTasksDirect,
getCacheStatsDirect,
parsePRDDirect,
updateTasksDirect,
updateTaskByIdDirect,
updateSubtaskByIdDirect,
generateTaskFilesDirect,
setTaskStatusDirect,
showTaskDirect,
nextTaskDirect,
expandTaskDirect,
addTaskDirect,
addSubtaskDirect,
removeSubtaskDirect,
analyzeTaskComplexityDirect,
clearSubtasksDirect,
expandAllTasksDirect,
removeDependencyDirect,
validateDependenciesDirect,
fixDependenciesDirect,
complexityReportDirect,
addDependencyDirect,
removeTaskDirect
};
listTasksDirect,
getCacheStatsDirect,
parsePRDDirect,
updateTasksDirect,
updateTaskByIdDirect,
updateSubtaskByIdDirect,
generateTaskFilesDirect,
setTaskStatusDirect,
showTaskDirect,
nextTaskDirect,
expandTaskDirect,
addTaskDirect,
addSubtaskDirect,
removeSubtaskDirect,
analyzeTaskComplexityDirect,
clearSubtasksDirect,
expandAllTasksDirect,
removeDependencyDirect,
validateDependenciesDirect,
fixDependenciesDirect,
complexityReportDirect,
addDependencyDirect,
removeTaskDirect
};

View File

@@ -11,9 +11,9 @@ dotenv.config();
// Default model configuration from CLI environment
const DEFAULT_MODEL_CONFIG = {
model: 'claude-3-7-sonnet-20250219',
maxTokens: 64000,
temperature: 0.2
model: 'claude-3-7-sonnet-20250219',
maxTokens: 64000,
temperature: 0.2
};
/**
@@ -24,25 +24,28 @@ const DEFAULT_MODEL_CONFIG = {
* @throws {Error} If API key is missing
*/
export function getAnthropicClientForMCP(session, log = console) {
try {
// Extract API key from session.env or fall back to environment variables
const apiKey = session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY;
if (!apiKey) {
throw new Error('ANTHROPIC_API_KEY not found in session environment or process.env');
}
// Initialize and return a new Anthropic client
return new Anthropic({
apiKey,
defaultHeaders: {
'anthropic-beta': 'output-128k-2025-02-19' // Include header for increased token limit
}
});
} catch (error) {
log.error(`Failed to initialize Anthropic client: ${error.message}`);
throw error;
}
try {
// Extract API key from session.env or fall back to environment variables
const apiKey =
session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY;
if (!apiKey) {
throw new Error(
'ANTHROPIC_API_KEY not found in session environment or process.env'
);
}
// Initialize and return a new Anthropic client
return new Anthropic({
apiKey,
defaultHeaders: {
'anthropic-beta': 'output-128k-2025-02-19' // Include header for increased token limit
}
});
} catch (error) {
log.error(`Failed to initialize Anthropic client: ${error.message}`);
throw error;
}
}
/**
@@ -53,26 +56,29 @@ export function getAnthropicClientForMCP(session, log = console) {
* @throws {Error} If API key is missing or OpenAI package can't be imported
*/
export async function getPerplexityClientForMCP(session, log = console) {
try {
// Extract API key from session.env or fall back to environment variables
const apiKey = session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY;
if (!apiKey) {
throw new Error('PERPLEXITY_API_KEY not found in session environment or process.env');
}
// Dynamically import OpenAI (it may not be used in all contexts)
const { default: OpenAI } = await import('openai');
// Initialize and return a new OpenAI client configured for Perplexity
return new OpenAI({
apiKey,
baseURL: 'https://api.perplexity.ai'
});
} catch (error) {
log.error(`Failed to initialize Perplexity client: ${error.message}`);
throw error;
}
try {
// Extract API key from session.env or fall back to environment variables
const apiKey =
session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY;
if (!apiKey) {
throw new Error(
'PERPLEXITY_API_KEY not found in session environment or process.env'
);
}
// Dynamically import OpenAI (it may not be used in all contexts)
const { default: OpenAI } = await import('openai');
// Initialize and return a new OpenAI client configured for Perplexity
return new OpenAI({
apiKey,
baseURL: 'https://api.perplexity.ai'
});
} catch (error) {
log.error(`Failed to initialize Perplexity client: ${error.message}`);
throw error;
}
}
/**
@@ -82,12 +88,12 @@ export async function getPerplexityClientForMCP(session, log = console) {
* @returns {Object} Model configuration with model, maxTokens, and temperature
*/
export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
// Get values from session or fall back to defaults
return {
model: session?.env?.MODEL || defaults.model,
maxTokens: parseInt(session?.env?.MAX_TOKENS || defaults.maxTokens),
temperature: parseFloat(session?.env?.TEMPERATURE || defaults.temperature)
};
// Get values from session or fall back to defaults
return {
model: session?.env?.MODEL || defaults.model,
maxTokens: parseInt(session?.env?.MAX_TOKENS || defaults.maxTokens),
temperature: parseFloat(session?.env?.TEMPERATURE || defaults.temperature)
};
}
/**
@@ -100,59 +106,78 @@ export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
* @returns {Promise<Object>} Selected model info with type and client
* @throws {Error} If no AI models are available
*/
export async function getBestAvailableAIModel(session, options = {}, log = console) {
const { requiresResearch = false, claudeOverloaded = false } = options;
// Test case: When research is needed but no Perplexity, use Claude
if (requiresResearch &&
!(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY) &&
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
try {
log.warn('Perplexity not available for research, using Claude');
const client = getAnthropicClientForMCP(session, log);
return { type: 'claude', client };
} catch (error) {
log.error(`Claude not available: ${error.message}`);
throw new Error('No AI models available for research');
}
}
// Regular path: Perplexity for research when available
if (requiresResearch && (session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY)) {
try {
const client = await getPerplexityClientForMCP(session, log);
return { type: 'perplexity', client };
} catch (error) {
log.warn(`Perplexity not available: ${error.message}`);
// Fall through to Claude as backup
}
}
// Test case: Claude for overloaded scenario
if (claudeOverloaded && (session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
try {
log.warn('Claude is overloaded but no alternatives are available. Proceeding with Claude anyway.');
const client = getAnthropicClientForMCP(session, log);
return { type: 'claude', client };
} catch (error) {
log.error(`Claude not available despite being overloaded: ${error.message}`);
throw new Error('No AI models available');
}
}
// Default case: Use Claude when available and not overloaded
if (!claudeOverloaded && (session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
try {
const client = getAnthropicClientForMCP(session, log);
return { type: 'claude', client };
} catch (error) {
log.warn(`Claude not available: ${error.message}`);
// Fall through to error if no other options
}
}
// If we got here, no models were successfully initialized
throw new Error('No AI models available. Please check your API keys.');
export async function getBestAvailableAIModel(
session,
options = {},
log = console
) {
const { requiresResearch = false, claudeOverloaded = false } = options;
// Test case: When research is needed but no Perplexity, use Claude
if (
requiresResearch &&
!(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY) &&
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
) {
try {
log.warn('Perplexity not available for research, using Claude');
const client = getAnthropicClientForMCP(session, log);
return { type: 'claude', client };
} catch (error) {
log.error(`Claude not available: ${error.message}`);
throw new Error('No AI models available for research');
}
}
// Regular path: Perplexity for research when available
if (
requiresResearch &&
(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY)
) {
try {
const client = await getPerplexityClientForMCP(session, log);
return { type: 'perplexity', client };
} catch (error) {
log.warn(`Perplexity not available: ${error.message}`);
// Fall through to Claude as backup
}
}
// Test case: Claude for overloaded scenario
if (
claudeOverloaded &&
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
) {
try {
log.warn(
'Claude is overloaded but no alternatives are available. Proceeding with Claude anyway.'
);
const client = getAnthropicClientForMCP(session, log);
return { type: 'claude', client };
} catch (error) {
log.error(
`Claude not available despite being overloaded: ${error.message}`
);
throw new Error('No AI models available');
}
}
// Default case: Use Claude when available and not overloaded
if (
!claudeOverloaded &&
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
) {
try {
const client = getAnthropicClientForMCP(session, log);
return { type: 'claude', client };
} catch (error) {
log.warn(`Claude not available: ${error.message}`);
// Fall through to error if no other options
}
}
// If we got here, no models were successfully initialized
throw new Error('No AI models available. Please check your API keys.');
}
/**
@@ -161,28 +186,28 @@ export async function getBestAvailableAIModel(session, options = {}, log = conso
* @returns {string} User-friendly error message
*/
export function handleClaudeError(error) {
// Check if it's a structured error response
if (error.type === 'error' && error.error) {
switch (error.error.type) {
case 'overloaded_error':
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
case 'rate_limit_error':
return 'You have exceeded the rate limit. Please wait a few minutes before making more requests.';
case 'invalid_request_error':
return 'There was an issue with the request format. If this persists, please report it as a bug.';
default:
return `Claude API error: ${error.error.message}`;
}
}
// Check for network/timeout errors
if (error.message?.toLowerCase().includes('timeout')) {
return 'The request to Claude timed out. Please try again.';
}
if (error.message?.toLowerCase().includes('network')) {
return 'There was a network error connecting to Claude. Please check your internet connection and try again.';
}
// Default error message
return `Error communicating with Claude: ${error.message}`;
}
// Check if it's a structured error response
if (error.type === 'error' && error.error) {
switch (error.error.type) {
case 'overloaded_error':
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
case 'rate_limit_error':
return 'You have exceeded the rate limit. Please wait a few minutes before making more requests.';
case 'invalid_request_error':
return 'There was an issue with the request format. If this persists, please report it as a bug.';
default:
return `Claude API error: ${error.error.message}`;
}
}
// Check for network/timeout errors
if (error.message?.toLowerCase().includes('timeout')) {
return 'The request to Claude timed out. Please try again.';
}
if (error.message?.toLowerCase().includes('network')) {
return 'There was a network error connecting to Claude. Please check your internet connection and try again.';
}
// Default error message
return `Error communicating with Claude: ${error.message}`;
}

View File

@@ -1,213 +1,247 @@
import { v4 as uuidv4 } from 'uuid';
class AsyncOperationManager {
constructor() {
this.operations = new Map(); // Stores active operation state
this.completedOperations = new Map(); // Stores completed operations
this.maxCompletedOperations = 100; // Maximum number of completed operations to store
this.listeners = new Map(); // For potential future notifications
}
constructor() {
this.operations = new Map(); // Stores active operation state
this.completedOperations = new Map(); // Stores completed operations
this.maxCompletedOperations = 100; // Maximum number of completed operations to store
this.listeners = new Map(); // For potential future notifications
}
/**
* Adds an operation to be executed asynchronously.
* @param {Function} operationFn - The async function to execute (e.g., a Direct function).
* @param {Object} args - Arguments to pass to the operationFn.
* @param {Object} context - The MCP tool context { log, reportProgress, session }.
* @returns {string} The unique ID assigned to this operation.
*/
addOperation(operationFn, args, context) {
const operationId = `op-${uuidv4()}`;
const operation = {
id: operationId,
status: 'pending',
startTime: Date.now(),
endTime: null,
result: null,
error: null,
// Store necessary parts of context, especially log for background execution
log: context.log,
reportProgress: context.reportProgress, // Pass reportProgress through
session: context.session // Pass session through if needed by the operationFn
};
this.operations.set(operationId, operation);
this.log(operationId, 'info', `Operation added.`);
/**
* Adds an operation to be executed asynchronously.
* @param {Function} operationFn - The async function to execute (e.g., a Direct function).
* @param {Object} args - Arguments to pass to the operationFn.
* @param {Object} context - The MCP tool context { log, reportProgress, session }.
* @returns {string} The unique ID assigned to this operation.
*/
addOperation(operationFn, args, context) {
const operationId = `op-${uuidv4()}`;
const operation = {
id: operationId,
status: 'pending',
startTime: Date.now(),
endTime: null,
result: null,
error: null,
// Store necessary parts of context, especially log for background execution
log: context.log,
reportProgress: context.reportProgress, // Pass reportProgress through
session: context.session // Pass session through if needed by the operationFn
};
this.operations.set(operationId, operation);
this.log(operationId, 'info', `Operation added.`);
// Start execution in the background (don't await here)
this._runOperation(operationId, operationFn, args, context).catch(err => {
// Catch unexpected errors during the async execution setup itself
this.log(operationId, 'error', `Critical error starting operation: ${err.message}`, { stack: err.stack });
operation.status = 'failed';
operation.error = { code: 'MANAGER_EXECUTION_ERROR', message: err.message };
operation.endTime = Date.now();
// Move to completed operations
this._moveToCompleted(operationId);
});
// Start execution in the background (don't await here)
this._runOperation(operationId, operationFn, args, context).catch((err) => {
// Catch unexpected errors during the async execution setup itself
this.log(
operationId,
'error',
`Critical error starting operation: ${err.message}`,
{ stack: err.stack }
);
operation.status = 'failed';
operation.error = {
code: 'MANAGER_EXECUTION_ERROR',
message: err.message
};
operation.endTime = Date.now();
return operationId;
}
// Move to completed operations
this._moveToCompleted(operationId);
});
/**
* Internal function to execute the operation.
* @param {string} operationId - The ID of the operation.
* @param {Function} operationFn - The async function to execute.
* @param {Object} args - Arguments for the function.
* @param {Object} context - The original MCP tool context.
*/
async _runOperation(operationId, operationFn, args, context) {
const operation = this.operations.get(operationId);
if (!operation) return; // Should not happen
return operationId;
}
operation.status = 'running';
this.log(operationId, 'info', `Operation running.`);
this.emit('statusChanged', { operationId, status: 'running' });
/**
* Internal function to execute the operation.
* @param {string} operationId - The ID of the operation.
* @param {Function} operationFn - The async function to execute.
* @param {Object} args - Arguments for the function.
* @param {Object} context - The original MCP tool context.
*/
async _runOperation(operationId, operationFn, args, context) {
const operation = this.operations.get(operationId);
if (!operation) return; // Should not happen
try {
// Pass the necessary context parts to the direct function
// The direct function needs to be adapted if it needs reportProgress
// We pass the original context's log, plus our wrapped reportProgress
const result = await operationFn(args, operation.log, {
reportProgress: (progress) => this._handleProgress(operationId, progress),
mcpLog: operation.log, // Pass log as mcpLog if direct fn expects it
session: operation.session
});
operation.status = result.success ? 'completed' : 'failed';
operation.result = result.success ? result.data : null;
operation.error = result.success ? null : result.error;
this.log(operationId, 'info', `Operation finished with status: ${operation.status}`);
operation.status = 'running';
this.log(operationId, 'info', `Operation running.`);
this.emit('statusChanged', { operationId, status: 'running' });
} catch (error) {
this.log(operationId, 'error', `Operation failed with error: ${error.message}`, { stack: error.stack });
operation.status = 'failed';
operation.error = { code: 'OPERATION_EXECUTION_ERROR', message: error.message };
} finally {
operation.endTime = Date.now();
this.emit('statusChanged', { operationId, status: operation.status, result: operation.result, error: operation.error });
// Move to completed operations if done or failed
if (operation.status === 'completed' || operation.status === 'failed') {
this._moveToCompleted(operationId);
}
}
}
/**
* Move an operation from active operations to completed operations history.
* @param {string} operationId - The ID of the operation to move.
* @private
*/
_moveToCompleted(operationId) {
const operation = this.operations.get(operationId);
if (!operation) return;
// Store only the necessary data in completed operations
const completedData = {
id: operation.id,
status: operation.status,
startTime: operation.startTime,
endTime: operation.endTime,
result: operation.result,
error: operation.error,
};
this.completedOperations.set(operationId, completedData);
this.operations.delete(operationId);
// Trim completed operations if exceeding maximum
if (this.completedOperations.size > this.maxCompletedOperations) {
// Get the oldest operation (sorted by endTime)
const oldest = [...this.completedOperations.entries()]
.sort((a, b) => a[1].endTime - b[1].endTime)[0];
if (oldest) {
this.completedOperations.delete(oldest[0]);
}
}
}
/**
* Handles progress updates from the running operation and forwards them.
* @param {string} operationId - The ID of the operation reporting progress.
* @param {Object} progress - The progress object { progress, total? }.
*/
_handleProgress(operationId, progress) {
const operation = this.operations.get(operationId);
if (operation && operation.reportProgress) {
try {
// Use the reportProgress function captured from the original context
operation.reportProgress(progress);
this.log(operationId, 'debug', `Reported progress: ${JSON.stringify(progress)}`);
} catch(err) {
this.log(operationId, 'warn', `Failed to report progress: ${err.message}`);
// Don't stop the operation, just log the reporting failure
}
}
}
try {
// Pass the necessary context parts to the direct function
// The direct function needs to be adapted if it needs reportProgress
// We pass the original context's log, plus our wrapped reportProgress
const result = await operationFn(args, operation.log, {
reportProgress: (progress) =>
this._handleProgress(operationId, progress),
mcpLog: operation.log, // Pass log as mcpLog if direct fn expects it
session: operation.session
});
/**
* Retrieves the status and result/error of an operation.
* @param {string} operationId - The ID of the operation.
* @returns {Object | null} The operation details or null if not found.
*/
getStatus(operationId) {
// First check active operations
const operation = this.operations.get(operationId);
if (operation) {
return {
id: operation.id,
status: operation.status,
startTime: operation.startTime,
endTime: operation.endTime,
result: operation.result,
error: operation.error,
};
}
// Then check completed operations
const completedOperation = this.completedOperations.get(operationId);
if (completedOperation) {
return completedOperation;
}
// Operation not found in either active or completed
return {
error: {
code: 'OPERATION_NOT_FOUND',
message: `Operation ID ${operationId} not found. It may have been completed and removed from history, or the ID may be invalid.`
},
status: 'not_found'
};
}
/**
* Internal logging helper to prefix logs with the operation ID.
* @param {string} operationId - The ID of the operation.
* @param {'info'|'warn'|'error'|'debug'} level - Log level.
* @param {string} message - Log message.
* @param {Object} [meta] - Additional metadata.
*/
log(operationId, level, message, meta = {}) {
const operation = this.operations.get(operationId);
// Use the logger instance associated with the operation if available, otherwise console
const logger = operation?.log || console;
const logFn = logger[level] || logger.log || console.log; // Fallback
logFn(`[AsyncOp ${operationId}] ${message}`, meta);
}
operation.status = result.success ? 'completed' : 'failed';
operation.result = result.success ? result.data : null;
operation.error = result.success ? null : result.error;
this.log(
operationId,
'info',
`Operation finished with status: ${operation.status}`
);
} catch (error) {
this.log(
operationId,
'error',
`Operation failed with error: ${error.message}`,
{ stack: error.stack }
);
operation.status = 'failed';
operation.error = {
code: 'OPERATION_EXECUTION_ERROR',
message: error.message
};
} finally {
operation.endTime = Date.now();
this.emit('statusChanged', {
operationId,
status: operation.status,
result: operation.result,
error: operation.error
});
// --- Basic Event Emitter ---
on(eventName, listener) {
if (!this.listeners.has(eventName)) {
this.listeners.set(eventName, []);
}
this.listeners.get(eventName).push(listener);
}
// Move to completed operations if done or failed
if (operation.status === 'completed' || operation.status === 'failed') {
this._moveToCompleted(operationId);
}
}
}
emit(eventName, data) {
if (this.listeners.has(eventName)) {
this.listeners.get(eventName).forEach(listener => listener(data));
}
}
/**
* Move an operation from active operations to completed operations history.
* @param {string} operationId - The ID of the operation to move.
* @private
*/
_moveToCompleted(operationId) {
const operation = this.operations.get(operationId);
if (!operation) return;
// Store only the necessary data in completed operations
const completedData = {
id: operation.id,
status: operation.status,
startTime: operation.startTime,
endTime: operation.endTime,
result: operation.result,
error: operation.error
};
this.completedOperations.set(operationId, completedData);
this.operations.delete(operationId);
// Trim completed operations if exceeding maximum
if (this.completedOperations.size > this.maxCompletedOperations) {
// Get the oldest operation (sorted by endTime)
const oldest = [...this.completedOperations.entries()].sort(
(a, b) => a[1].endTime - b[1].endTime
)[0];
if (oldest) {
this.completedOperations.delete(oldest[0]);
}
}
}
/**
* Handles progress updates from the running operation and forwards them.
* @param {string} operationId - The ID of the operation reporting progress.
* @param {Object} progress - The progress object { progress, total? }.
*/
_handleProgress(operationId, progress) {
const operation = this.operations.get(operationId);
if (operation && operation.reportProgress) {
try {
// Use the reportProgress function captured from the original context
operation.reportProgress(progress);
this.log(
operationId,
'debug',
`Reported progress: ${JSON.stringify(progress)}`
);
} catch (err) {
this.log(
operationId,
'warn',
`Failed to report progress: ${err.message}`
);
// Don't stop the operation, just log the reporting failure
}
}
}
/**
* Retrieves the status and result/error of an operation.
* @param {string} operationId - The ID of the operation.
* @returns {Object | null} The operation details or null if not found.
*/
getStatus(operationId) {
// First check active operations
const operation = this.operations.get(operationId);
if (operation) {
return {
id: operation.id,
status: operation.status,
startTime: operation.startTime,
endTime: operation.endTime,
result: operation.result,
error: operation.error
};
}
// Then check completed operations
const completedOperation = this.completedOperations.get(operationId);
if (completedOperation) {
return completedOperation;
}
// Operation not found in either active or completed
return {
error: {
code: 'OPERATION_NOT_FOUND',
message: `Operation ID ${operationId} not found. It may have been completed and removed from history, or the ID may be invalid.`
},
status: 'not_found'
};
}
/**
* Internal logging helper to prefix logs with the operation ID.
* @param {string} operationId - The ID of the operation.
* @param {'info'|'warn'|'error'|'debug'} level - Log level.
* @param {string} message - Log message.
* @param {Object} [meta] - Additional metadata.
*/
log(operationId, level, message, meta = {}) {
const operation = this.operations.get(operationId);
// Use the logger instance associated with the operation if available, otherwise console
const logger = operation?.log || console;
const logFn = logger[level] || logger.log || console.log; // Fallback
logFn(`[AsyncOp ${operationId}] ${message}`, meta);
}
// --- Basic Event Emitter ---
on(eventName, listener) {
if (!this.listeners.has(eventName)) {
this.listeners.set(eventName, []);
}
this.listeners.get(eventName).push(listener);
}
emit(eventName, data) {
if (this.listeners.has(eventName)) {
this.listeners.get(eventName).forEach((listener) => listener(data));
}
}
}
// Export a singleton instance

View File

@@ -6,38 +6,42 @@
* @returns {Promise<any>} The result of the actionFn.
*/
export async function withSessionEnv(sessionEnv, actionFn) {
if (!sessionEnv || typeof sessionEnv !== 'object' || Object.keys(sessionEnv).length === 0) {
// If no sessionEnv is provided, just run the action directly
return await actionFn();
}
const originalEnv = {};
const keysToRestore = [];
// Set environment variables from sessionEnv
for (const key in sessionEnv) {
if (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {
// Store original value if it exists, otherwise mark for deletion
if (process.env[key] !== undefined) {
originalEnv[key] = process.env[key];
}
keysToRestore.push(key);
process.env[key] = sessionEnv[key];
}
}
try {
// Execute the provided action function
return await actionFn();
} finally {
// Restore original environment variables
for (const key of keysToRestore) {
if (Object.prototype.hasOwnProperty.call(originalEnv, key)) {
process.env[key] = originalEnv[key];
} else {
// If the key didn't exist originally, delete it
delete process.env[key];
}
}
}
}
if (
!sessionEnv ||
typeof sessionEnv !== 'object' ||
Object.keys(sessionEnv).length === 0
) {
// If no sessionEnv is provided, just run the action directly
return await actionFn();
}
const originalEnv = {};
const keysToRestore = [];
// Set environment variables from sessionEnv
for (const key in sessionEnv) {
if (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {
// Store original value if it exists, otherwise mark for deletion
if (process.env[key] !== undefined) {
originalEnv[key] = process.env[key];
}
keysToRestore.push(key);
process.env[key] = sessionEnv[key];
}
}
try {
// Execute the provided action function
return await actionFn();
} finally {
// Restore original environment variables
for (const key of keysToRestore) {
if (Object.prototype.hasOwnProperty.call(originalEnv, key)) {
process.env[key] = originalEnv[key];
} else {
// If the key didn't exist originally, delete it
delete process.env[key];
}
}
}
}

View File

@@ -1,9 +1,9 @@
/**
* path-utils.js
* Utility functions for file path operations in Task Master
*
*
* This module provides robust path resolution for both:
* 1. PACKAGE PATH: Where task-master code is installed
* 1. PACKAGE PATH: Where task-master code is installed
* (global node_modules OR local ./node_modules/task-master OR direct from repo)
* 2. PROJECT PATH: Where user's tasks.json resides (typically user's project root)
*/
@@ -18,43 +18,43 @@ export let lastFoundProjectRoot = null;
// Project marker files that indicate a potential project root
export const PROJECT_MARKERS = [
// Task Master specific
'tasks.json',
'tasks/tasks.json',
// Common version control
'.git',
'.svn',
// Common package files
'package.json',
'pyproject.toml',
'Gemfile',
'go.mod',
'Cargo.toml',
// Common IDE/editor folders
'.cursor',
'.vscode',
'.idea',
// Common dependency directories (check if directory)
'node_modules',
'venv',
'.venv',
// Common config files
'.env',
'.eslintrc',
'tsconfig.json',
'babel.config.js',
'jest.config.js',
'webpack.config.js',
// Common CI/CD files
'.github/workflows',
'.gitlab-ci.yml',
'.circleci/config.yml'
// Task Master specific
'tasks.json',
'tasks/tasks.json',
// Common version control
'.git',
'.svn',
// Common package files
'package.json',
'pyproject.toml',
'Gemfile',
'go.mod',
'Cargo.toml',
// Common IDE/editor folders
'.cursor',
'.vscode',
'.idea',
// Common dependency directories (check if directory)
'node_modules',
'venv',
'.venv',
// Common config files
'.env',
'.eslintrc',
'tsconfig.json',
'babel.config.js',
'jest.config.js',
'webpack.config.js',
// Common CI/CD files
'.github/workflows',
'.gitlab-ci.yml',
'.circleci/config.yml'
];
/**
@@ -63,15 +63,15 @@ export const PROJECT_MARKERS = [
* @returns {string} - Absolute path to the package installation directory
*/
export function getPackagePath() {
// When running from source, __dirname is the directory containing this file
// When running from npm, we need to find the package root
const thisFilePath = fileURLToPath(import.meta.url);
const thisFileDir = path.dirname(thisFilePath);
// Navigate from core/utils up to the package root
// In dev: /path/to/task-master/mcp-server/src/core/utils -> /path/to/task-master
// In npm: /path/to/node_modules/task-master/mcp-server/src/core/utils -> /path/to/node_modules/task-master
return path.resolve(thisFileDir, '../../../../');
// When running from source, __dirname is the directory containing this file
// When running from npm, we need to find the package root
const thisFilePath = fileURLToPath(import.meta.url);
const thisFileDir = path.dirname(thisFilePath);
// Navigate from core/utils up to the package root
// In dev: /path/to/task-master/mcp-server/src/core/utils -> /path/to/task-master
// In npm: /path/to/node_modules/task-master/mcp-server/src/core/utils -> /path/to/node_modules/task-master
return path.resolve(thisFileDir, '../../../../');
}
/**
@@ -82,62 +82,73 @@ export function getPackagePath() {
* @throws {Error} - If tasks.json cannot be found.
*/
export function findTasksJsonPath(args, log) {
// PRECEDENCE ORDER for finding tasks.json:
// 1. Explicitly provided `projectRoot` in args (Highest priority, expected in MCP context)
// 2. Previously found/cached `lastFoundProjectRoot` (primarily for CLI performance)
// 3. Search upwards from current working directory (`process.cwd()`) - CLI usage
// 1. If project root is explicitly provided (e.g., from MCP session), use it directly
if (args.projectRoot) {
const projectRoot = args.projectRoot;
log.info(`Using explicitly provided project root: ${projectRoot}`);
try {
// This will throw if tasks.json isn't found within this root
return findTasksJsonInDirectory(projectRoot, args.file, log);
} catch (error) {
// Include debug info in error
const debugInfo = {
projectRoot,
currentDir: process.cwd(),
serverDir: path.dirname(process.argv[1]),
possibleProjectRoot: path.resolve(path.dirname(process.argv[1]), '../..'),
lastFoundProjectRoot,
searchedPaths: error.message
};
error.message = `Tasks file not found in any of the expected locations relative to project root "${projectRoot}" (from session).\nDebug Info: ${JSON.stringify(debugInfo, null, 2)}`;
throw error;
}
}
// --- Fallback logic primarily for CLI or when projectRoot isn't passed ---
// PRECEDENCE ORDER for finding tasks.json:
// 1. Explicitly provided `projectRoot` in args (Highest priority, expected in MCP context)
// 2. Previously found/cached `lastFoundProjectRoot` (primarily for CLI performance)
// 3. Search upwards from current working directory (`process.cwd()`) - CLI usage
// 2. If we have a last known project root that worked, try it first
if (lastFoundProjectRoot) {
log.info(`Trying last known project root: ${lastFoundProjectRoot}`);
try {
// Use the cached root
const tasksPath = findTasksJsonInDirectory(lastFoundProjectRoot, args.file, log);
return tasksPath; // Return if found in cached root
} catch (error) {
log.info(`Task file not found in last known project root, continuing search.`);
// Continue with search if not found in cache
}
}
// 3. Start search from current directory (most common CLI scenario)
const startDir = process.cwd();
log.info(`Searching for tasks.json starting from current directory: ${startDir}`);
// Try to find tasks.json by walking up the directory tree from cwd
try {
// This will throw if not found in the CWD tree
return findTasksJsonWithParentSearch(startDir, args.file, log);
} catch (error) {
// If all attempts fail, augment and throw the original error from CWD search
error.message = `${error.message}\n\nPossible solutions:\n1. Run the command from your project directory containing tasks.json\n2. Use --project-root=/path/to/project to specify the project location (if using CLI)\n3. Ensure the project root is correctly passed from the client (if using MCP)\n\nCurrent working directory: ${startDir}\nLast known project root: ${lastFoundProjectRoot}\nProject root from args: ${args.projectRoot}`;
throw error;
}
// 1. If project root is explicitly provided (e.g., from MCP session), use it directly
if (args.projectRoot) {
const projectRoot = args.projectRoot;
log.info(`Using explicitly provided project root: ${projectRoot}`);
try {
// This will throw if tasks.json isn't found within this root
return findTasksJsonInDirectory(projectRoot, args.file, log);
} catch (error) {
// Include debug info in error
const debugInfo = {
projectRoot,
currentDir: process.cwd(),
serverDir: path.dirname(process.argv[1]),
possibleProjectRoot: path.resolve(
path.dirname(process.argv[1]),
'../..'
),
lastFoundProjectRoot,
searchedPaths: error.message
};
error.message = `Tasks file not found in any of the expected locations relative to project root "${projectRoot}" (from session).\nDebug Info: ${JSON.stringify(debugInfo, null, 2)}`;
throw error;
}
}
// --- Fallback logic primarily for CLI or when projectRoot isn't passed ---
// 2. If we have a last known project root that worked, try it first
if (lastFoundProjectRoot) {
log.info(`Trying last known project root: ${lastFoundProjectRoot}`);
try {
// Use the cached root
const tasksPath = findTasksJsonInDirectory(
lastFoundProjectRoot,
args.file,
log
);
return tasksPath; // Return if found in cached root
} catch (error) {
log.info(
`Task file not found in last known project root, continuing search.`
);
// Continue with search if not found in cache
}
}
// 3. Start search from current directory (most common CLI scenario)
const startDir = process.cwd();
log.info(
`Searching for tasks.json starting from current directory: ${startDir}`
);
// Try to find tasks.json by walking up the directory tree from cwd
try {
// This will throw if not found in the CWD tree
return findTasksJsonWithParentSearch(startDir, args.file, log);
} catch (error) {
// If all attempts fail, augment and throw the original error from CWD search
error.message = `${error.message}\n\nPossible solutions:\n1. Run the command from your project directory containing tasks.json\n2. Use --project-root=/path/to/project to specify the project location (if using CLI)\n3. Ensure the project root is correctly passed from the client (if using MCP)\n\nCurrent working directory: ${startDir}\nLast known project root: ${lastFoundProjectRoot}\nProject root from args: ${args.projectRoot}`;
throw error;
}
}
/**
@@ -146,11 +157,11 @@ export function findTasksJsonPath(args, log) {
* @returns {boolean} - True if the directory contains any project markers
*/
function hasProjectMarkers(dirPath) {
return PROJECT_MARKERS.some(marker => {
const markerPath = path.join(dirPath, marker);
// Check if the marker exists as either a file or directory
return fs.existsSync(markerPath);
});
return PROJECT_MARKERS.some((marker) => {
const markerPath = path.join(dirPath, marker);
// Check if the marker exists as either a file or directory
return fs.existsSync(markerPath);
});
}
/**
@@ -162,39 +173,41 @@ function hasProjectMarkers(dirPath) {
* @throws {Error} - If tasks.json cannot be found
*/
function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
const possiblePaths = [];
const possiblePaths = [];
// 1. If a file is explicitly provided relative to dirPath
if (explicitFilePath) {
possiblePaths.push(path.resolve(dirPath, explicitFilePath));
}
// 1. If a file is explicitly provided relative to dirPath
if (explicitFilePath) {
possiblePaths.push(path.resolve(dirPath, explicitFilePath));
}
// 2. Check the standard locations relative to dirPath
possiblePaths.push(
path.join(dirPath, 'tasks.json'),
path.join(dirPath, 'tasks', 'tasks.json')
);
// 2. Check the standard locations relative to dirPath
possiblePaths.push(
path.join(dirPath, 'tasks.json'),
path.join(dirPath, 'tasks', 'tasks.json')
);
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
// Find the first existing path
for (const p of possiblePaths) {
log.info(`Checking if exists: ${p}`);
const exists = fs.existsSync(p);
log.info(`Path ${p} exists: ${exists}`);
if (exists) {
log.info(`Found tasks file at: ${p}`);
// Store the project root for future use
lastFoundProjectRoot = dirPath;
return p;
}
}
// Find the first existing path
for (const p of possiblePaths) {
log.info(`Checking if exists: ${p}`);
const exists = fs.existsSync(p);
log.info(`Path ${p} exists: ${exists}`);
// If no file was found, throw an error
const error = new Error(`Tasks file not found in any of the expected locations relative to ${dirPath}: ${possiblePaths.join(', ')}`);
error.code = 'TASKS_FILE_NOT_FOUND';
throw error;
if (exists) {
log.info(`Found tasks file at: ${p}`);
// Store the project root for future use
lastFoundProjectRoot = dirPath;
return p;
}
}
// If no file was found, throw an error
const error = new Error(
`Tasks file not found in any of the expected locations relative to ${dirPath}: ${possiblePaths.join(', ')}`
);
error.code = 'TASKS_FILE_NOT_FOUND';
throw error;
}
/**
@@ -207,66 +220,74 @@ function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
* @throws {Error} - If tasks.json cannot be found in any parent directory
*/
function findTasksJsonWithParentSearch(startDir, explicitFilePath, log) {
let currentDir = startDir;
const rootDir = path.parse(currentDir).root;
// Keep traversing up until we hit the root directory
while (currentDir !== rootDir) {
// First check for tasks.json directly
try {
return findTasksJsonInDirectory(currentDir, explicitFilePath, log);
} catch (error) {
// If tasks.json not found but the directory has project markers,
// log it as a potential project root (helpful for debugging)
if (hasProjectMarkers(currentDir)) {
log.info(`Found project markers in ${currentDir}, but no tasks.json`);
}
// Move up to parent directory
const parentDir = path.dirname(currentDir);
// Check if we've reached the root
if (parentDir === currentDir) {
break;
}
log.info(`Tasks file not found in ${currentDir}, searching in parent directory: ${parentDir}`);
currentDir = parentDir;
}
}
// If we've searched all the way to the root and found nothing
const error = new Error(`Tasks file not found in ${startDir} or any parent directory.`);
error.code = 'TASKS_FILE_NOT_FOUND';
throw error;
let currentDir = startDir;
const rootDir = path.parse(currentDir).root;
// Keep traversing up until we hit the root directory
while (currentDir !== rootDir) {
// First check for tasks.json directly
try {
return findTasksJsonInDirectory(currentDir, explicitFilePath, log);
} catch (error) {
// If tasks.json not found but the directory has project markers,
// log it as a potential project root (helpful for debugging)
if (hasProjectMarkers(currentDir)) {
log.info(`Found project markers in ${currentDir}, but no tasks.json`);
}
// Move up to parent directory
const parentDir = path.dirname(currentDir);
// Check if we've reached the root
if (parentDir === currentDir) {
break;
}
log.info(
`Tasks file not found in ${currentDir}, searching in parent directory: ${parentDir}`
);
currentDir = parentDir;
}
}
// If we've searched all the way to the root and found nothing
const error = new Error(
`Tasks file not found in ${startDir} or any parent directory.`
);
error.code = 'TASKS_FILE_NOT_FOUND';
throw error;
}
// Note: findTasksWithNpmConsideration is not used by findTasksJsonPath and might be legacy or used elsewhere.
// If confirmed unused, it could potentially be removed in a separate cleanup.
function findTasksWithNpmConsideration(startDir, log) {
// First try our recursive parent search from cwd
try {
return findTasksJsonWithParentSearch(startDir, null, log);
} catch (error) {
// If that fails, try looking relative to the executable location
const execPath = process.argv[1];
const execDir = path.dirname(execPath);
log.info(`Looking for tasks file relative to executable at: ${execDir}`);
try {
return findTasksJsonWithParentSearch(execDir, null, log);
} catch (secondError) {
// If that also fails, check standard locations in user's home directory
const homeDir = os.homedir();
log.info(`Looking for tasks file in home directory: ${homeDir}`);
try {
// Check standard locations in home dir
return findTasksJsonInDirectory(path.join(homeDir, '.task-master'), null, log);
} catch (thirdError) {
// If all approaches fail, throw the original error
throw error;
}
}
}
}
// First try our recursive parent search from cwd
try {
return findTasksJsonWithParentSearch(startDir, null, log);
} catch (error) {
// If that fails, try looking relative to the executable location
const execPath = process.argv[1];
const execDir = path.dirname(execPath);
log.info(`Looking for tasks file relative to executable at: ${execDir}`);
try {
return findTasksJsonWithParentSearch(execDir, null, log);
} catch (secondError) {
// If that also fails, check standard locations in user's home directory
const homeDir = os.homedir();
log.info(`Looking for tasks file in home directory: ${homeDir}`);
try {
// Check standard locations in home dir
return findTasksJsonInDirectory(
path.join(homeDir, '.task-master'),
null,
log
);
} catch (thirdError) {
// If all approaches fail, throw the original error
throw error;
}
}
}
}

View File

@@ -1,10 +1,10 @@
import { FastMCP } from "fastmcp";
import path from "path";
import dotenv from "dotenv";
import { fileURLToPath } from "url";
import fs from "fs";
import logger from "./logger.js";
import { registerTaskMasterTools } from "./tools/index.js";
import { FastMCP } from 'fastmcp';
import path from 'path';
import dotenv from 'dotenv';
import { fileURLToPath } from 'url';
import fs from 'fs';
import logger from './logger.js';
import { registerTaskMasterTools } from './tools/index.js';
import { asyncOperationManager } from './core/utils/async-manager.js';
// Load environment variables
@@ -18,74 +18,74 @@ const __dirname = path.dirname(__filename);
* Main MCP server class that integrates with Task Master
*/
class TaskMasterMCPServer {
constructor() {
// Get version from package.json using synchronous fs
const packagePath = path.join(__dirname, "../../package.json");
const packageJson = JSON.parse(fs.readFileSync(packagePath, "utf8"));
constructor() {
// Get version from package.json using synchronous fs
const packagePath = path.join(__dirname, '../../package.json');
const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8'));
this.options = {
name: "Task Master MCP Server",
version: packageJson.version,
};
this.options = {
name: 'Task Master MCP Server',
version: packageJson.version
};
this.server = new FastMCP(this.options);
this.initialized = false;
this.server = new FastMCP(this.options);
this.initialized = false;
this.server.addResource({});
this.server.addResource({});
this.server.addResourceTemplate({});
this.server.addResourceTemplate({});
// Make the manager accessible (e.g., pass it to tool registration)
this.asyncManager = asyncOperationManager;
// Make the manager accessible (e.g., pass it to tool registration)
this.asyncManager = asyncOperationManager;
// Bind methods
this.init = this.init.bind(this);
this.start = this.start.bind(this);
this.stop = this.stop.bind(this);
// Bind methods
this.init = this.init.bind(this);
this.start = this.start.bind(this);
this.stop = this.stop.bind(this);
// Setup logging
this.logger = logger;
}
// Setup logging
this.logger = logger;
}
/**
* Initialize the MCP server with necessary tools and routes
*/
async init() {
if (this.initialized) return;
/**
* Initialize the MCP server with necessary tools and routes
*/
async init() {
if (this.initialized) return;
// Pass the manager instance to the tool registration function
registerTaskMasterTools(this.server, this.asyncManager);
// Pass the manager instance to the tool registration function
registerTaskMasterTools(this.server, this.asyncManager);
this.initialized = true;
this.initialized = true;
return this;
}
return this;
}
/**
* Start the MCP server
*/
async start() {
if (!this.initialized) {
await this.init();
}
/**
* Start the MCP server
*/
async start() {
if (!this.initialized) {
await this.init();
}
// Start the FastMCP server with increased timeout
await this.server.start({
transportType: "stdio",
timeout: 120000 // 2 minutes timeout (in milliseconds)
});
// Start the FastMCP server with increased timeout
await this.server.start({
transportType: 'stdio',
timeout: 120000 // 2 minutes timeout (in milliseconds)
});
return this;
}
return this;
}
/**
* Stop the MCP server
*/
async stop() {
if (this.server) {
await this.server.stop();
}
}
/**
* Stop the MCP server
*/
async stop() {
if (this.server) {
await this.server.stop();
}
}
}
// Export the manager from here as well, if needed elsewhere

View File

@@ -1,19 +1,19 @@
import chalk from "chalk";
import { isSilentMode } from "../../scripts/modules/utils.js";
import chalk from 'chalk';
import { isSilentMode } from '../../scripts/modules/utils.js';
// Define log levels
const LOG_LEVELS = {
debug: 0,
info: 1,
warn: 2,
error: 3,
success: 4,
debug: 0,
info: 1,
warn: 2,
error: 3,
success: 4
};
// Get log level from environment or default to info
const LOG_LEVEL = process.env.LOG_LEVEL
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] ?? LOG_LEVELS.info
: LOG_LEVELS.info;
? (LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] ?? LOG_LEVELS.info)
: LOG_LEVELS.info;
/**
* Logs a message with the specified level
@@ -21,56 +21,66 @@ const LOG_LEVEL = process.env.LOG_LEVEL
* @param {...any} args - Arguments to log
*/
function log(level, ...args) {
// Skip logging if silent mode is enabled
if (isSilentMode()) {
return;
}
// Skip logging if silent mode is enabled
if (isSilentMode()) {
return;
}
// Use text prefixes instead of emojis
const prefixes = {
debug: chalk.gray("[DEBUG]"),
info: chalk.blue("[INFO]"),
warn: chalk.yellow("[WARN]"),
error: chalk.red("[ERROR]"),
success: chalk.green("[SUCCESS]"),
};
// Use text prefixes instead of emojis
const prefixes = {
debug: chalk.gray('[DEBUG]'),
info: chalk.blue('[INFO]'),
warn: chalk.yellow('[WARN]'),
error: chalk.red('[ERROR]'),
success: chalk.green('[SUCCESS]')
};
if (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {
const prefix = prefixes[level] || "";
let coloredArgs = args;
if (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {
const prefix = prefixes[level] || '';
let coloredArgs = args;
try {
switch(level) {
case "error":
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.red(arg) : arg);
break;
case "warn":
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.yellow(arg) : arg);
break;
case "success":
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.green(arg) : arg);
break;
case "info":
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.blue(arg) : arg);
break;
case "debug":
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.gray(arg) : arg);
break;
// default: use original args (no color)
}
} catch (colorError) {
// Fallback if chalk fails on an argument
// Use console.error here for internal logger errors, separate from normal logging
console.error("Internal Logger Error applying chalk color:", colorError);
coloredArgs = args;
}
try {
switch (level) {
case 'error':
coloredArgs = args.map((arg) =>
typeof arg === 'string' ? chalk.red(arg) : arg
);
break;
case 'warn':
coloredArgs = args.map((arg) =>
typeof arg === 'string' ? chalk.yellow(arg) : arg
);
break;
case 'success':
coloredArgs = args.map((arg) =>
typeof arg === 'string' ? chalk.green(arg) : arg
);
break;
case 'info':
coloredArgs = args.map((arg) =>
typeof arg === 'string' ? chalk.blue(arg) : arg
);
break;
case 'debug':
coloredArgs = args.map((arg) =>
typeof arg === 'string' ? chalk.gray(arg) : arg
);
break;
// default: use original args (no color)
}
} catch (colorError) {
// Fallback if chalk fails on an argument
// Use console.error here for internal logger errors, separate from normal logging
console.error('Internal Logger Error applying chalk color:', colorError);
coloredArgs = args;
}
// Revert to console.log - FastMCP's context logger (context.log)
// is responsible for directing logs correctly (e.g., to stderr)
// during tool execution without upsetting the client connection.
// Logs outside of tool execution (like startup) will go to stdout.
console.log(prefix, ...coloredArgs);
}
// Revert to console.log - FastMCP's context logger (context.log)
// is responsible for directing logs correctly (e.g., to stderr)
// during tool execution without upsetting the client connection.
// Logs outside of tool execution (like startup) will go to stdout.
console.log(prefix, ...coloredArgs);
}
}
/**
@@ -78,16 +88,19 @@ function log(level, ...args) {
* @returns {Object} Logger object with info, error, debug, warn, and success methods
*/
export function createLogger() {
const createLogMethod = (level) => (...args) => log(level, ...args);
const createLogMethod =
(level) =>
(...args) =>
log(level, ...args);
return {
debug: createLogMethod("debug"),
info: createLogMethod("info"),
warn: createLogMethod("warn"),
error: createLogMethod("error"),
success: createLogMethod("success"),
log: log, // Also expose the raw log function
};
return {
debug: createLogMethod('debug'),
info: createLogMethod('info'),
warn: createLogMethod('warn'),
error: createLogMethod('error'),
success: createLogMethod('success'),
log: log // Also expose the raw log function
};
}
// Export a default logger instance

View File

@@ -3,63 +3,79 @@
* Tool for adding a dependency to a task
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { addDependencyDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { addDependencyDirect } from '../core/task-master-core.js';
/**
* Register the addDependency tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerAddDependencyTool(server) {
server.addTool({
name: "add_dependency",
description: "Add a dependency relationship between two tasks",
parameters: z.object({
id: z.string().describe("ID of task that will depend on another task"),
dependsOn: z.string().describe("ID of task that will become a dependency"),
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`);
reportProgress({ progress: 0 });
// Get project root using the utility function
let rootFolder = getProjectRootFromSession(session, log);
// Fallback to args.projectRoot if session didn't provide one
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
// Call the direct function with the resolved rootFolder
const result = await addDependencyDirect({
projectRoot: rootFolder,
...args
}, log, { reportProgress, mcpLog: log, session});
server.addTool({
name: 'add_dependency',
description: 'Add a dependency relationship between two tasks',
parameters: z.object({
id: z.string().describe('ID of task that will depend on another task'),
dependsOn: z
.string()
.describe('ID of task that will become a dependency'),
file: z
.string()
.optional()
.describe('Path to the tasks file (default: tasks/tasks.json)'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(
`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`
);
reportProgress({ progress: 0 });
reportProgress({ progress: 100 });
// Log result
if (result.success) {
log.info(`Successfully added dependency: ${result.data.message}`);
} else {
log.error(`Failed to add dependency: ${result.error.message}`);
}
// Use handleApiResult to format the response
return handleApiResult(result, log, 'Error adding dependency');
} catch (error) {
log.error(`Error in addDependency tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
// Get project root using the utility function
let rootFolder = getProjectRootFromSession(session, log);
// Fallback to args.projectRoot if session didn't provide one
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
// Call the direct function with the resolved rootFolder
const result = await addDependencyDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ reportProgress, mcpLog: log, session }
);
reportProgress({ progress: 100 });
// Log result
if (result.success) {
log.info(`Successfully added dependency: ${result.data.message}`);
} else {
log.error(`Failed to add dependency: ${result.error.message}`);
}
// Use handleApiResult to format the response
return handleApiResult(result, log, 'Error adding dependency');
} catch (error) {
log.error(`Error in addDependency tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,61 +3,94 @@
* Tool for adding subtasks to existing tasks
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { addSubtaskDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { addSubtaskDirect } from '../core/task-master-core.js';
/**
* Register the addSubtask tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerAddSubtaskTool(server) {
server.addTool({
name: "add_subtask",
description: "Add a subtask to an existing task",
parameters: z.object({
id: z.string().describe("Parent task ID (required)"),
taskId: z.string().optional().describe("Existing task ID to convert to subtask"),
title: z.string().optional().describe("Title for the new subtask (when creating a new subtask)"),
description: z.string().optional().describe("Description for the new subtask"),
details: z.string().optional().describe("Implementation details for the new subtask"),
status: z.string().optional().describe("Status for the new subtask (default: 'pending')"),
dependencies: z.string().optional().describe("Comma-separated list of dependency IDs for the new subtask"),
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
skipGenerate: z.boolean().optional().describe("Skip regenerating task files"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await addSubtaskDirect({
projectRoot: rootFolder,
...args
}, log, { reportProgress, mcpLog: log, session});
if (result.success) {
log.info(`Subtask added successfully: ${result.data.message}`);
} else {
log.error(`Failed to add subtask: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error adding subtask');
} catch (error) {
log.error(`Error in addSubtask tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'add_subtask',
description: 'Add a subtask to an existing task',
parameters: z.object({
id: z.string().describe('Parent task ID (required)'),
taskId: z
.string()
.optional()
.describe('Existing task ID to convert to subtask'),
title: z
.string()
.optional()
.describe('Title for the new subtask (when creating a new subtask)'),
description: z
.string()
.optional()
.describe('Description for the new subtask'),
details: z
.string()
.optional()
.describe('Implementation details for the new subtask'),
status: z
.string()
.optional()
.describe("Status for the new subtask (default: 'pending')"),
dependencies: z
.string()
.optional()
.describe('Comma-separated list of dependency IDs for the new subtask'),
file: z
.string()
.optional()
.describe('Path to the tasks file (default: tasks/tasks.json)'),
skipGenerate: z
.boolean()
.optional()
.describe('Skip regenerating task files'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await addSubtaskDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ reportProgress, mcpLog: log, session }
);
if (result.success) {
log.info(`Subtask added successfully: ${result.data.message}`);
} else {
log.error(`Failed to add subtask: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error adding subtask');
} catch (error) {
log.error(`Error in addSubtask tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,56 +3,72 @@
* Tool to add a new task using AI
*/
import { z } from "zod";
import { z } from 'zod';
import {
createErrorResponse,
createContentResponse,
getProjectRootFromSession,
executeTaskMasterCommand,
handleApiResult
} from "./utils.js";
import { addTaskDirect } from "../core/task-master-core.js";
createErrorResponse,
createContentResponse,
getProjectRootFromSession,
executeTaskMasterCommand,
handleApiResult
} from './utils.js';
import { addTaskDirect } from '../core/task-master-core.js';
/**
* Register the addTask tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerAddTaskTool(server) {
server.addTool({
name: "add_task",
description: "Add a new task using AI",
parameters: z.object({
prompt: z.string().describe("Description of the task to add"),
dependencies: z.string().optional().describe("Comma-separated list of task IDs this task depends on"),
priority: z.string().optional().describe("Task priority (high, medium, low)"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z.string().optional().describe("Root directory of the project"),
research: z.boolean().optional().describe("Whether to use research capabilities for task creation")
}),
execute: async (args, { log, reportProgress, session }) => {
try {
log.info(`Starting add-task with args: ${JSON.stringify(args)}`);
// Get project root from session
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
// Call the direct function
const result = await addTaskDirect({
...args,
projectRoot: rootFolder
}, log, { reportProgress, session });
// Return the result
return handleApiResult(result, log);
} catch (error) {
log.error(`Error in add-task tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}
server.addTool({
name: 'add_task',
description: 'Add a new task using AI',
parameters: z.object({
prompt: z.string().describe('Description of the task to add'),
dependencies: z
.string()
.optional()
.describe('Comma-separated list of task IDs this task depends on'),
priority: z
.string()
.optional()
.describe('Task priority (high, medium, low)'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe('Root directory of the project'),
research: z
.boolean()
.optional()
.describe('Whether to use research capabilities for task creation')
}),
execute: async (args, { log, reportProgress, session }) => {
try {
log.info(`Starting add-task with args: ${JSON.stringify(args)}`);
// Get project root from session
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
// Call the direct function
const result = await addTaskDirect(
{
...args,
projectRoot: rootFolder
},
log,
{ reportProgress, session }
);
// Return the result
return handleApiResult(result, log);
} catch (error) {
log.error(`Error in add-task tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,58 +3,95 @@
* Tool for analyzing task complexity and generating recommendations
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { analyzeTaskComplexityDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { analyzeTaskComplexityDirect } from '../core/task-master-core.js';
/**
* Register the analyze tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerAnalyzeTool(server) {
server.addTool({
name: "analyze_project_complexity",
description: "Analyze task complexity and generate expansion recommendations",
parameters: z.object({
output: z.string().optional().describe("Output file path for the report (default: scripts/task-complexity-report.json)"),
model: z.string().optional().describe("LLM model to use for analysis (defaults to configured model)"),
threshold: z.union([z.number(), z.string()]).optional().describe("Minimum complexity score to recommend expansion (1-10) (default: 5)"),
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
research: z.boolean().optional().describe("Use Perplexity AI for research-backed complexity analysis"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session }) => {
try {
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await analyzeTaskComplexityDirect({
projectRoot: rootFolder,
...args
}, log, { session });
if (result.success) {
log.info(`Task complexity analysis complete: ${result.data.message}`);
log.info(`Report summary: ${JSON.stringify(result.data.reportSummary)}`);
} else {
log.error(`Failed to analyze task complexity: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error analyzing task complexity');
} catch (error) {
log.error(`Error in analyze tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'analyze_project_complexity',
description:
'Analyze task complexity and generate expansion recommendations',
parameters: z.object({
output: z
.string()
.optional()
.describe(
'Output file path for the report (default: scripts/task-complexity-report.json)'
),
model: z
.string()
.optional()
.describe(
'LLM model to use for analysis (defaults to configured model)'
),
threshold: z
.union([z.number(), z.string()])
.optional()
.describe(
'Minimum complexity score to recommend expansion (1-10) (default: 5)'
),
file: z
.string()
.optional()
.describe('Path to the tasks file (default: tasks/tasks.json)'),
research: z
.boolean()
.optional()
.describe('Use Perplexity AI for research-backed complexity analysis'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session }) => {
try {
log.info(
`Analyzing task complexity with args: ${JSON.stringify(args)}`
);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await analyzeTaskComplexityDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ session }
);
if (result.success) {
log.info(`Task complexity analysis complete: ${result.data.message}`);
log.info(
`Report summary: ${JSON.stringify(result.data.reportSummary)}`
);
} else {
log.error(
`Failed to analyze task complexity: ${result.error.message}`
);
}
return handleApiResult(result, log, 'Error analyzing task complexity');
} catch (error) {
log.error(`Error in analyze tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,61 +3,78 @@
* Tool for clearing subtasks from parent tasks
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { clearSubtasksDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { clearSubtasksDirect } from '../core/task-master-core.js';
/**
* Register the clearSubtasks tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerClearSubtasksTool(server) {
server.addTool({
name: "clear_subtasks",
description: "Clear subtasks from specified tasks",
parameters: z.object({
id: z.string().optional().describe("Task IDs (comma-separated) to clear subtasks from"),
all: z.boolean().optional().describe("Clear subtasks from all tasks"),
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}).refine(data => data.id || data.all, {
message: "Either 'id' or 'all' parameter must be provided",
path: ["id", "all"]
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await clearSubtasksDirect({
projectRoot: rootFolder,
...args
}, log, { reportProgress, mcpLog: log, session});
reportProgress({ progress: 100 });
if (result.success) {
log.info(`Subtasks cleared successfully: ${result.data.message}`);
} else {
log.error(`Failed to clear subtasks: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error clearing subtasks');
} catch (error) {
log.error(`Error in clearSubtasks tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'clear_subtasks',
description: 'Clear subtasks from specified tasks',
parameters: z
.object({
id: z
.string()
.optional()
.describe('Task IDs (comma-separated) to clear subtasks from'),
all: z.boolean().optional().describe('Clear subtasks from all tasks'),
file: z
.string()
.optional()
.describe('Path to the tasks file (default: tasks/tasks.json)'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
})
.refine((data) => data.id || data.all, {
message: "Either 'id' or 'all' parameter must be provided",
path: ['id', 'all']
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await clearSubtasksDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ reportProgress, mcpLog: log, session }
);
reportProgress({ progress: 100 });
if (result.success) {
log.info(`Subtasks cleared successfully: ${result.data.message}`);
} else {
log.error(`Failed to clear subtasks: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error clearing subtasks');
} catch (error) {
log.error(`Error in clearSubtasks tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,56 +3,81 @@
* Tool for displaying the complexity analysis report
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { complexityReportDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { complexityReportDirect } from '../core/task-master-core.js';
/**
* Register the complexityReport tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerComplexityReportTool(server) {
server.addTool({
name: "complexity_report",
description: "Display the complexity analysis report in a readable format",
parameters: z.object({
file: z.string().optional().describe("Path to the report file (default: scripts/task-complexity-report.json)"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await complexityReportDirect({
projectRoot: rootFolder,
...args
}, log/*, { reportProgress, mcpLog: log, session}*/);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}`);
} else {
log.error(`Failed to retrieve complexity report: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error retrieving complexity report');
} catch (error) {
log.error(`Error in complexity-report tool: ${error.message}`);
return createErrorResponse(`Failed to retrieve complexity report: ${error.message}`);
}
},
});
}
server.addTool({
name: 'complexity_report',
description: 'Display the complexity analysis report in a readable format',
parameters: z.object({
file: z
.string()
.optional()
.describe(
'Path to the report file (default: scripts/task-complexity-report.json)'
),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(
`Getting complexity report with args: ${JSON.stringify(args)}`
);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await complexityReportDirect(
{
projectRoot: rootFolder,
...args
},
log /*, { reportProgress, mcpLog: log, session}*/
);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(
`Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}`
);
} else {
log.error(
`Failed to retrieve complexity report: ${result.error.message}`
);
}
return handleApiResult(
result,
log,
'Error retrieving complexity report'
);
} catch (error) {
log.error(`Error in complexity-report tool: ${error.message}`);
return createErrorResponse(
`Failed to retrieve complexity report: ${error.message}`
);
}
}
});
}

View File

@@ -3,57 +3,87 @@
* Tool for expanding all pending tasks with subtasks
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { expandAllTasksDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { expandAllTasksDirect } from '../core/task-master-core.js';
/**
* Register the expandAll tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerExpandAllTool(server) {
server.addTool({
name: "expand_all",
description: "Expand all pending tasks into subtasks",
parameters: z.object({
num: z.string().optional().describe("Number of subtasks to generate for each task"),
research: z.boolean().optional().describe("Enable Perplexity AI for research-backed subtask generation"),
prompt: z.string().optional().describe("Additional context to guide subtask generation"),
force: z.boolean().optional().describe("Force regeneration of subtasks for tasks that already have them"),
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session }) => {
try {
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await expandAllTasksDirect({
projectRoot: rootFolder,
...args
}, log, { session });
if (result.success) {
log.info(`Successfully expanded all tasks: ${result.data.message}`);
} else {
log.error(`Failed to expand all tasks: ${result.error?.message || 'Unknown error'}`);
}
return handleApiResult(result, log, 'Error expanding all tasks');
} catch (error) {
log.error(`Error in expand-all tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'expand_all',
description: 'Expand all pending tasks into subtasks',
parameters: z.object({
num: z
.string()
.optional()
.describe('Number of subtasks to generate for each task'),
research: z
.boolean()
.optional()
.describe(
'Enable Perplexity AI for research-backed subtask generation'
),
prompt: z
.string()
.optional()
.describe('Additional context to guide subtask generation'),
force: z
.boolean()
.optional()
.describe(
'Force regeneration of subtasks for tasks that already have them'
),
file: z
.string()
.optional()
.describe('Path to the tasks file (default: tasks/tasks.json)'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session }) => {
try {
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await expandAllTasksDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ session }
);
if (result.success) {
log.info(`Successfully expanded all tasks: ${result.data.message}`);
} else {
log.error(
`Failed to expand all tasks: ${result.error?.message || 'Unknown error'}`
);
}
return handleApiResult(result, log, 'Error expanding all tasks');
} catch (error) {
log.error(`Error in expand-all tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,75 +3,88 @@
* Tool to expand a task into subtasks
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { expandTaskDirect } from "../core/task-master-core.js";
import fs from "fs";
import path from "path";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { expandTaskDirect } from '../core/task-master-core.js';
import fs from 'fs';
import path from 'path';
/**
* Register the expand-task tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerExpandTaskTool(server) {
server.addTool({
name: "expand_task",
description: "Expand a task into subtasks for detailed implementation",
parameters: z.object({
id: z.string().describe("ID of task to expand"),
num: z.union([z.string(), z.number()]).optional().describe("Number of subtasks to generate"),
research: z.boolean().optional().describe("Use Perplexity AI for research-backed generation"),
prompt: z.string().optional().describe("Additional context for subtask generation"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log, reportProgress, session }) => {
try {
log.info(`Starting expand-task with args: ${JSON.stringify(args)}`);
// Get project root from session
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
log.info(`Project root resolved to: ${rootFolder}`);
// Check for tasks.json in the standard locations
const tasksJsonPath = path.join(rootFolder, 'tasks', 'tasks.json');
if (fs.existsSync(tasksJsonPath)) {
log.info(`Found tasks.json at ${tasksJsonPath}`);
// Add the file parameter directly to args
args.file = tasksJsonPath;
} else {
log.warn(`Could not find tasks.json at ${tasksJsonPath}`);
}
// Call direct function with only session in the context, not reportProgress
// Use the pattern recommended in the MCP guidelines
const result = await expandTaskDirect({
...args,
projectRoot: rootFolder
}, log, { session }); // Only pass session, NOT reportProgress
// Return the result
return handleApiResult(result, log, 'Error expanding task');
} catch (error) {
log.error(`Error in expand task tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'expand_task',
description: 'Expand a task into subtasks for detailed implementation',
parameters: z.object({
id: z.string().describe('ID of task to expand'),
num: z
.union([z.string(), z.number()])
.optional()
.describe('Number of subtasks to generate'),
research: z
.boolean()
.optional()
.describe('Use Perplexity AI for research-backed generation'),
prompt: z
.string()
.optional()
.describe('Additional context for subtask generation'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, reportProgress, session }) => {
try {
log.info(`Starting expand-task with args: ${JSON.stringify(args)}`);
// Get project root from session
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
log.info(`Project root resolved to: ${rootFolder}`);
// Check for tasks.json in the standard locations
const tasksJsonPath = path.join(rootFolder, 'tasks', 'tasks.json');
if (fs.existsSync(tasksJsonPath)) {
log.info(`Found tasks.json at ${tasksJsonPath}`);
// Add the file parameter directly to args
args.file = tasksJsonPath;
} else {
log.warn(`Could not find tasks.json at ${tasksJsonPath}`);
}
// Call direct function with only session in the context, not reportProgress
// Use the pattern recommended in the MCP guidelines
const result = await expandTaskDirect(
{
...args,
projectRoot: rootFolder
},
log,
{ session }
); // Only pass session, NOT reportProgress
// Return the result
return handleApiResult(result, log, 'Error expanding task');
} catch (error) {
log.error(`Error in expand task tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,56 +3,65 @@
* Tool for automatically fixing invalid task dependencies
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { fixDependenciesDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { fixDependenciesDirect } from '../core/task-master-core.js';
/**
* Register the fixDependencies tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerFixDependenciesTool(server) {
server.addTool({
name: "fix_dependencies",
description: "Fix invalid dependencies in tasks automatically",
parameters: z.object({
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);
await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await fixDependenciesDirect({
projectRoot: rootFolder,
...args
}, log, { reportProgress, mcpLog: log, session});
await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully fixed dependencies: ${result.data.message}`);
} else {
log.error(`Failed to fix dependencies: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error fixing dependencies');
} catch (error) {
log.error(`Error in fixDependencies tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}
server.addTool({
name: 'fix_dependencies',
description: 'Fix invalid dependencies in tasks automatically',
parameters: z.object({
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);
await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await fixDependenciesDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ reportProgress, mcpLog: log, session }
);
await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully fixed dependencies: ${result.data.message}`);
} else {
log.error(`Failed to fix dependencies: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error fixing dependencies');
} catch (error) {
log.error(`Error in fixDependencies tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,62 +3,71 @@
* Tool to generate individual task files from tasks.json
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { generateTaskFilesDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { generateTaskFilesDirect } from '../core/task-master-core.js';
/**
* Register the generate tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerGenerateTool(server) {
server.addTool({
name: "generate",
description: "Generates individual task files in tasks/ directory based on tasks.json",
parameters: z.object({
file: z.string().optional().describe("Path to the tasks file"),
output: z.string().optional().describe("Output directory (default: same directory as tasks file)"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await generateTaskFilesDirect({
projectRoot: rootFolder,
...args
}, log/*, { reportProgress, mcpLog: log, session}*/);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully generated task files: ${result.data.message}`);
} else {
log.error(`Failed to generate task files: ${result.error?.message || 'Unknown error'}`);
}
return handleApiResult(result, log, 'Error generating task files');
} catch (error) {
log.error(`Error in generate tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'generate',
description:
'Generates individual task files in tasks/ directory based on tasks.json',
parameters: z.object({
file: z.string().optional().describe('Path to the tasks file'),
output: z
.string()
.optional()
.describe('Output directory (default: same directory as tasks file)'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await generateTaskFilesDirect(
{
projectRoot: rootFolder,
...args
},
log /*, { reportProgress, mcpLog: log, session}*/
);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully generated task files: ${result.data.message}`);
} else {
log.error(
`Failed to generate task files: ${result.error?.message || 'Unknown error'}`
);
}
return handleApiResult(result, log, 'Error generating task files');
} catch (error) {
log.error(`Error in generate tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -8,35 +8,40 @@ import { createErrorResponse, createContentResponse } from './utils.js'; // Assu
* @param {AsyncOperationManager} asyncManager - The async operation manager.
*/
export function registerGetOperationStatusTool(server, asyncManager) {
server.addTool({
name: 'get_operation_status',
description: 'Retrieves the status and result/error of a background operation.',
parameters: z.object({
operationId: z.string().describe('The ID of the operation to check.'),
}),
execute: async (args, { log }) => {
try {
const { operationId } = args;
log.info(`Checking status for operation ID: ${operationId}`);
server.addTool({
name: 'get_operation_status',
description:
'Retrieves the status and result/error of a background operation.',
parameters: z.object({
operationId: z.string().describe('The ID of the operation to check.')
}),
execute: async (args, { log }) => {
try {
const { operationId } = args;
log.info(`Checking status for operation ID: ${operationId}`);
const status = asyncManager.getStatus(operationId);
const status = asyncManager.getStatus(operationId);
// Status will now always return an object, but it might have status='not_found'
if (status.status === 'not_found') {
log.warn(`Operation ID not found: ${operationId}`);
return createErrorResponse(
status.error?.message || `Operation ID not found: ${operationId}`,
status.error?.code || 'OPERATION_NOT_FOUND'
);
}
// Status will now always return an object, but it might have status='not_found'
if (status.status === 'not_found') {
log.warn(`Operation ID not found: ${operationId}`);
return createErrorResponse(
status.error?.message || `Operation ID not found: ${operationId}`,
status.error?.code || 'OPERATION_NOT_FOUND'
);
}
log.info(`Status for ${operationId}: ${status.status}`);
return createContentResponse(status);
} catch (error) {
log.error(`Error in get_operation_status tool: ${error.message}`, { stack: error.stack });
return createErrorResponse(`Failed to get operation status: ${error.message}`, 'GET_STATUS_ERROR');
}
},
});
}
log.info(`Status for ${operationId}: ${status.status}`);
return createContentResponse(status);
} catch (error) {
log.error(`Error in get_operation_status tool: ${error.message}`, {
stack: error.stack
});
return createErrorResponse(
`Failed to get operation status: ${error.message}`,
'GET_STATUS_ERROR'
);
}
}
});
}

View File

@@ -3,13 +3,13 @@
* Tool to get task details by ID
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { showTaskDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { showTaskDirect } from '../core/task-master-core.js';
/**
* Custom processor function that removes allTasks from the response
@@ -17,16 +17,16 @@ import { showTaskDirect } from "../core/task-master-core.js";
* @returns {Object} - The processed data with allTasks removed
*/
function processTaskResponse(data) {
if (!data) return data;
// If we have the expected structure with task and allTasks
if (data.task) {
// Return only the task object, removing the allTasks array
return data.task;
}
// If structure is unexpected, return as is
return data;
if (!data) return data;
// If we have the expected structure with task and allTasks
if (data.task) {
// Return only the task object, removing the allTasks array
return data.task;
}
// If structure is unexpected, return as is
return data;
}
/**
@@ -34,59 +34,75 @@ function processTaskResponse(data) {
* @param {Object} server - FastMCP server instance
*/
export function registerShowTaskTool(server) {
server.addTool({
name: "get_task",
description: "Get detailed information about a specific task",
parameters: z.object({
id: z.string().describe("Task ID to get"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log, session, reportProgress }) => {
// Log the session right at the start of execute
log.info(`Session object received in execute: ${JSON.stringify(session)}`); // Use JSON.stringify for better visibility
server.addTool({
name: 'get_task',
description: 'Get detailed information about a specific task',
parameters: z.object({
id: z.string().describe('Task ID to get'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
// Log the session right at the start of execute
log.info(
`Session object received in execute: ${JSON.stringify(session)}`
); // Use JSON.stringify for better visibility
try {
log.info(`Getting task details for ID: ${args.id}`);
try {
log.info(`Getting task details for ID: ${args.id}`);
log.info(`Session object received in execute: ${JSON.stringify(session)}`); // Use JSON.stringify for better visibility
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
} else if (!rootFolder) {
// Ensure we always have *some* root, even if session failed and args didn't provide one
rootFolder = process.cwd();
log.warn(`Session and args failed to provide root, using CWD: ${rootFolder}`);
}
log.info(
`Session object received in execute: ${JSON.stringify(session)}`
); // Use JSON.stringify for better visibility
log.info(`Attempting to use project root: ${rootFolder}`); // Log the final resolved root
let rootFolder = getProjectRootFromSession(session, log);
log.info(`Root folder: ${rootFolder}`); // Log the final resolved root
const result = await showTaskDirect({
projectRoot: rootFolder,
...args
}, log);
if (result.success) {
log.info(`Successfully retrieved task details for ID: ${args.id}${result.fromCache ? ' (from cache)' : ''}`);
} else {
log.error(`Failed to get task: ${result.error.message}`);
}
// Use our custom processor function to remove allTasks from the response
return handleApiResult(result, log, 'Error retrieving task details', processTaskResponse);
} catch (error) {
log.error(`Error in get-task tool: ${error.message}\n${error.stack}`); // Add stack trace
return createErrorResponse(`Failed to get task: ${error.message}`);
}
},
});
}
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
} else if (!rootFolder) {
// Ensure we always have *some* root, even if session failed and args didn't provide one
rootFolder = process.cwd();
log.warn(
`Session and args failed to provide root, using CWD: ${rootFolder}`
);
}
log.info(`Attempting to use project root: ${rootFolder}`); // Log the final resolved root
log.info(`Root folder: ${rootFolder}`); // Log the final resolved root
const result = await showTaskDirect(
{
projectRoot: rootFolder,
...args
},
log
);
if (result.success) {
log.info(
`Successfully retrieved task details for ID: ${args.id}${result.fromCache ? ' (from cache)' : ''}`
);
} else {
log.error(`Failed to get task: ${result.error.message}`);
}
// Use our custom processor function to remove allTasks from the response
return handleApiResult(
result,
log,
'Error retrieving task details',
processTaskResponse
);
} catch (error) {
log.error(`Error in get-task tool: ${error.message}\n${error.stack}`); // Add stack trace
return createErrorResponse(`Failed to get task: ${error.message}`);
}
}
});
}

View File

@@ -3,63 +3,79 @@
* Tool to get all tasks from Task Master
*/
import { z } from "zod";
import { z } from 'zod';
import {
createErrorResponse,
handleApiResult,
getProjectRootFromSession
} from "./utils.js";
import { listTasksDirect } from "../core/task-master-core.js";
createErrorResponse,
handleApiResult,
getProjectRootFromSession
} from './utils.js';
import { listTasksDirect } from '../core/task-master-core.js';
/**
* Register the getTasks tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerListTasksTool(server) {
server.addTool({
name: "get_tasks",
description: "Get all tasks from Task Master, optionally filtering by status and including subtasks.",
parameters: z.object({
status: z.string().optional().describe("Filter tasks by status (e.g., 'pending', 'done')"),
withSubtasks: z
.boolean()
.optional()
.describe("Include subtasks nested within their parent tasks in the response"),
file: z.string().optional().describe("Path to the tasks file (relative to project root or absolute)"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: automatically detected from session or CWD)"
),
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Getting tasks with filters: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await listTasksDirect({
projectRoot: rootFolder,
...args
}, log/*, { reportProgress, mcpLog: log, session}*/);
// await reportProgress({ progress: 100 });
log.info(`Retrieved ${result.success ? (result.data?.tasks?.length || 0) : 0} tasks${result.fromCache ? ' (from cache)' : ''}`);
return handleApiResult(result, log, 'Error getting tasks');
} catch (error) {
log.error(`Error getting tasks: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
server.addTool({
name: 'get_tasks',
description:
'Get all tasks from Task Master, optionally filtering by status and including subtasks.',
parameters: z.object({
status: z
.string()
.optional()
.describe("Filter tasks by status (e.g., 'pending', 'done')"),
withSubtasks: z
.boolean()
.optional()
.describe(
'Include subtasks nested within their parent tasks in the response'
),
file: z
.string()
.optional()
.describe(
'Path to the tasks file (relative to project root or absolute)'
),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: automatically detected from session or CWD)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Getting tasks with filters: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await listTasksDirect(
{
projectRoot: rootFolder,
...args
},
log /*, { reportProgress, mcpLog: log, session}*/
);
// await reportProgress({ progress: 100 });
log.info(
`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks${result.fromCache ? ' (from cache)' : ''}`
);
return handleApiResult(result, log, 'Error getting tasks');
} catch (error) {
log.error(`Error getting tasks: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}
// We no longer need the formatTasksResponse function as we're returning raw JSON data

View File

@@ -3,28 +3,28 @@
* Export all Task Master CLI tools for MCP server
*/
import { registerListTasksTool } from "./get-tasks.js";
import logger from "../logger.js";
import { registerSetTaskStatusTool } from "./set-task-status.js";
import { registerParsePRDTool } from "./parse-prd.js";
import { registerUpdateTool } from "./update.js";
import { registerUpdateTaskTool } from "./update-task.js";
import { registerUpdateSubtaskTool } from "./update-subtask.js";
import { registerGenerateTool } from "./generate.js";
import { registerShowTaskTool } from "./get-task.js";
import { registerNextTaskTool } from "./next-task.js";
import { registerExpandTaskTool } from "./expand-task.js";
import { registerAddTaskTool } from "./add-task.js";
import { registerAddSubtaskTool } from "./add-subtask.js";
import { registerRemoveSubtaskTool } from "./remove-subtask.js";
import { registerAnalyzeTool } from "./analyze.js";
import { registerClearSubtasksTool } from "./clear-subtasks.js";
import { registerExpandAllTool } from "./expand-all.js";
import { registerRemoveDependencyTool } from "./remove-dependency.js";
import { registerValidateDependenciesTool } from "./validate-dependencies.js";
import { registerFixDependenciesTool } from "./fix-dependencies.js";
import { registerComplexityReportTool } from "./complexity-report.js";
import { registerAddDependencyTool } from "./add-dependency.js";
import { registerListTasksTool } from './get-tasks.js';
import logger from '../logger.js';
import { registerSetTaskStatusTool } from './set-task-status.js';
import { registerParsePRDTool } from './parse-prd.js';
import { registerUpdateTool } from './update.js';
import { registerUpdateTaskTool } from './update-task.js';
import { registerUpdateSubtaskTool } from './update-subtask.js';
import { registerGenerateTool } from './generate.js';
import { registerShowTaskTool } from './get-task.js';
import { registerNextTaskTool } from './next-task.js';
import { registerExpandTaskTool } from './expand-task.js';
import { registerAddTaskTool } from './add-task.js';
import { registerAddSubtaskTool } from './add-subtask.js';
import { registerRemoveSubtaskTool } from './remove-subtask.js';
import { registerAnalyzeTool } from './analyze.js';
import { registerClearSubtasksTool } from './clear-subtasks.js';
import { registerExpandAllTool } from './expand-all.js';
import { registerRemoveDependencyTool } from './remove-dependency.js';
import { registerValidateDependenciesTool } from './validate-dependencies.js';
import { registerFixDependenciesTool } from './fix-dependencies.js';
import { registerComplexityReportTool } from './complexity-report.js';
import { registerAddDependencyTool } from './add-dependency.js';
import { registerRemoveTaskTool } from './remove-task.js';
import { registerInitializeProjectTool } from './initialize-project.js';
import { asyncOperationManager } from '../core/utils/async-manager.js';
@@ -34,40 +34,40 @@ import { asyncOperationManager } from '../core/utils/async-manager.js';
* @param {Object} server - FastMCP server instance
* @param {asyncOperationManager} asyncManager - The async operation manager instance
*/
export function registerTaskMasterTools(server, asyncManager) {
try {
// Register each tool
registerListTasksTool(server);
registerSetTaskStatusTool(server);
registerParsePRDTool(server);
registerUpdateTool(server);
registerUpdateTaskTool(server);
registerUpdateSubtaskTool(server);
registerGenerateTool(server);
registerShowTaskTool(server);
registerNextTaskTool(server);
registerExpandTaskTool(server);
registerAddTaskTool(server, asyncManager);
registerAddSubtaskTool(server);
registerRemoveSubtaskTool(server);
registerAnalyzeTool(server);
registerClearSubtasksTool(server);
registerExpandAllTool(server);
registerRemoveDependencyTool(server);
registerValidateDependenciesTool(server);
registerFixDependenciesTool(server);
registerComplexityReportTool(server);
registerAddDependencyTool(server);
registerRemoveTaskTool(server);
registerInitializeProjectTool(server);
} catch (error) {
logger.error(`Error registering Task Master tools: ${error.message}`);
throw error;
}
export function registerTaskMasterTools(server, asyncManager) {
try {
// Register each tool
registerListTasksTool(server);
registerSetTaskStatusTool(server);
registerParsePRDTool(server);
registerUpdateTool(server);
registerUpdateTaskTool(server);
registerUpdateSubtaskTool(server);
registerGenerateTool(server);
registerShowTaskTool(server);
registerNextTaskTool(server);
registerExpandTaskTool(server);
registerAddTaskTool(server, asyncManager);
registerAddSubtaskTool(server);
registerRemoveSubtaskTool(server);
registerAnalyzeTool(server);
registerClearSubtasksTool(server);
registerExpandAllTool(server);
registerRemoveDependencyTool(server);
registerValidateDependenciesTool(server);
registerFixDependenciesTool(server);
registerComplexityReportTool(server);
registerAddDependencyTool(server);
registerRemoveTaskTool(server);
registerInitializeProjectTool(server);
} catch (error) {
logger.error(`Error registering Task Master tools: ${error.message}`);
throw error;
}
logger.info('Registered Task Master MCP tools');
logger.info('Registered Task Master MCP tools');
}
export default {
registerTaskMasterTools,
};
registerTaskMasterTools
};

View File

@@ -1,62 +1,99 @@
import { z } from "zod";
import { z } from 'zod';
import { execSync } from 'child_process';
import { createContentResponse, createErrorResponse } from "./utils.js"; // Only need response creators
import { createContentResponse, createErrorResponse } from './utils.js'; // Only need response creators
export function registerInitializeProjectTool(server) {
server.addTool({
name: "initialize_project", // snake_case for tool name
description: "Initializes a new Task Master project structure in the current working directory by running 'task-master init'.",
parameters: z.object({
projectName: z.string().optional().describe("The name for the new project."),
projectDescription: z.string().optional().describe("A brief description for the project."),
projectVersion: z.string().optional().describe("The initial version for the project (e.g., '0.1.0')."),
authorName: z.string().optional().describe("The author's name."),
skipInstall: z.boolean().optional().default(false).describe("Skip installing dependencies automatically."),
addAliases: z.boolean().optional().default(false).describe("Add shell aliases (tm, taskmaster) to shell config file."),
yes: z.boolean().optional().default(false).describe("Skip prompts and use default values or provided arguments."),
// projectRoot is not needed here as 'init' works on the current directory
}),
execute: async (args, { log }) => { // Destructure context to get log
try {
log.info(`Executing initialize_project with args: ${JSON.stringify(args)}`);
server.addTool({
name: 'initialize_project', // snake_case for tool name
description:
"Initializes a new Task Master project structure in the current working directory by running 'task-master init'.",
parameters: z.object({
projectName: z
.string()
.optional()
.describe('The name for the new project.'),
projectDescription: z
.string()
.optional()
.describe('A brief description for the project.'),
projectVersion: z
.string()
.optional()
.describe("The initial version for the project (e.g., '0.1.0')."),
authorName: z.string().optional().describe("The author's name."),
skipInstall: z
.boolean()
.optional()
.default(false)
.describe('Skip installing dependencies automatically.'),
addAliases: z
.boolean()
.optional()
.default(false)
.describe('Add shell aliases (tm, taskmaster) to shell config file.'),
yes: z
.boolean()
.optional()
.default(false)
.describe('Skip prompts and use default values or provided arguments.')
// projectRoot is not needed here as 'init' works on the current directory
}),
execute: async (args, { log }) => {
// Destructure context to get log
try {
log.info(
`Executing initialize_project with args: ${JSON.stringify(args)}`
);
// Construct the command arguments carefully
// Using npx ensures it uses the locally installed version if available, or fetches it
let command = 'npx task-master init';
const cliArgs = [];
if (args.projectName) cliArgs.push(`--name "${args.projectName.replace(/"/g, '\\"')}"`); // Escape quotes
if (args.projectDescription) cliArgs.push(`--description "${args.projectDescription.replace(/"/g, '\\"')}"`);
if (args.projectVersion) cliArgs.push(`--version "${args.projectVersion.replace(/"/g, '\\"')}"`);
if (args.authorName) cliArgs.push(`--author "${args.authorName.replace(/"/g, '\\"')}"`);
if (args.skipInstall) cliArgs.push('--skip-install');
if (args.addAliases) cliArgs.push('--aliases');
if (args.yes) cliArgs.push('--yes');
// Construct the command arguments carefully
// Using npx ensures it uses the locally installed version if available, or fetches it
let command = 'npx task-master init';
const cliArgs = [];
if (args.projectName)
cliArgs.push(`--name "${args.projectName.replace(/"/g, '\\"')}"`); // Escape quotes
if (args.projectDescription)
cliArgs.push(
`--description "${args.projectDescription.replace(/"/g, '\\"')}"`
);
if (args.projectVersion)
cliArgs.push(
`--version "${args.projectVersion.replace(/"/g, '\\"')}"`
);
if (args.authorName)
cliArgs.push(`--author "${args.authorName.replace(/"/g, '\\"')}"`);
if (args.skipInstall) cliArgs.push('--skip-install');
if (args.addAliases) cliArgs.push('--aliases');
if (args.yes) cliArgs.push('--yes');
command += ' ' + cliArgs.join(' ');
command += ' ' + cliArgs.join(' ');
log.info(`Constructed command: ${command}`);
log.info(`Constructed command: ${command}`);
// Execute the command in the current working directory of the server process
// Capture stdout/stderr. Use a reasonable timeout (e.g., 5 minutes)
const output = execSync(command, { encoding: 'utf8', stdio: 'pipe', timeout: 300000 });
// Execute the command in the current working directory of the server process
// Capture stdout/stderr. Use a reasonable timeout (e.g., 5 minutes)
const output = execSync(command, {
encoding: 'utf8',
stdio: 'pipe',
timeout: 300000
});
log.info(`Initialization output:\n${output}`);
log.info(`Initialization output:\n${output}`);
// Return a standard success response manually
return createContentResponse(
"Project initialized successfully.",
{ output: output } // Include output in the data payload
);
// Return a standard success response manually
return createContentResponse(
'Project initialized successfully.',
{ output: output } // Include output in the data payload
);
} catch (error) {
// Catch errors from execSync or timeouts
const errorMessage = `Project initialization failed: ${error.message}`;
const errorDetails =
error.stderr?.toString() || error.stdout?.toString() || error.message; // Provide stderr/stdout if available
log.error(`${errorMessage}\nDetails: ${errorDetails}`);
} catch (error) {
// Catch errors from execSync or timeouts
const errorMessage = `Project initialization failed: ${error.message}`;
const errorDetails = error.stderr?.toString() || error.stdout?.toString() || error.message; // Provide stderr/stdout if available
log.error(`${errorMessage}\nDetails: ${errorDetails}`);
// Return a standard error response manually
return createErrorResponse(errorMessage, { details: errorDetails });
}
}
});
}
// Return a standard error response manually
return createErrorResponse(errorMessage, { details: errorDetails });
}
}
});
}

View File

@@ -3,61 +3,69 @@
* Tool to find the next task to work on
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { nextTaskDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { nextTaskDirect } from '../core/task-master-core.js';
/**
* Register the next-task tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerNextTaskTool(server) {
server.addTool({
name: "next_task",
description: "Find the next task to work on based on dependencies and status",
parameters: z.object({
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Finding next task with args: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await nextTaskDirect({
projectRoot: rootFolder,
...args
}, log/*, { reportProgress, mcpLog: log, session}*/);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully found next task: ${result.data?.task?.id || 'No available tasks'}`);
} else {
log.error(`Failed to find next task: ${result.error?.message || 'Unknown error'}`);
}
return handleApiResult(result, log, 'Error finding next task');
} catch (error) {
log.error(`Error in nextTask tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'next_task',
description:
'Find the next task to work on based on dependencies and status',
parameters: z.object({
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Finding next task with args: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await nextTaskDirect(
{
projectRoot: rootFolder,
...args
},
log /*, { reportProgress, mcpLog: log, session}*/
);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(
`Successfully found next task: ${result.data?.task?.id || 'No available tasks'}`
);
} else {
log.error(
`Failed to find next task: ${result.error?.message || 'Unknown error'}`
);
}
return handleApiResult(result, log, 'Error finding next task');
} catch (error) {
log.error(`Error in nextTask tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,61 +3,86 @@
* Tool to parse PRD document and generate tasks
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { parsePRDDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { parsePRDDirect } from '../core/task-master-core.js';
/**
* Register the parsePRD tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerParsePRDTool(server) {
server.addTool({
name: "parse_prd",
description: "Parse a Product Requirements Document (PRD) or text file to automatically generate initial tasks.",
parameters: z.object({
input: z.string().default("tasks/tasks.json").describe("Path to the PRD document file (relative to project root or absolute)"),
numTasks: z.string().optional().describe("Approximate number of top-level tasks to generate (default: 10)"),
output: z.string().optional().describe("Output path for tasks.json file (relative to project root or absolute, default: tasks/tasks.json)"),
force: z.boolean().optional().describe("Allow overwriting an existing tasks.json file."),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: automatically detected from session or CWD)"
),
}),
execute: async (args, { log, session }) => {
try {
log.info(`Parsing PRD with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await parsePRDDirect({
projectRoot: rootFolder,
...args
}, log, { session });
if (result.success) {
log.info(`Successfully parsed PRD: ${result.data.message}`);
} else {
log.error(`Failed to parse PRD: ${result.error?.message || 'Unknown error'}`);
}
return handleApiResult(result, log, 'Error parsing PRD');
} catch (error) {
log.error(`Error in parse-prd tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'parse_prd',
description:
'Parse a Product Requirements Document (PRD) or text file to automatically generate initial tasks.',
parameters: z.object({
input: z
.string()
.default('tasks/tasks.json')
.describe(
'Path to the PRD document file (relative to project root or absolute)'
),
numTasks: z
.string()
.optional()
.describe(
'Approximate number of top-level tasks to generate (default: 10)'
),
output: z
.string()
.optional()
.describe(
'Output path for tasks.json file (relative to project root or absolute, default: tasks/tasks.json)'
),
force: z
.boolean()
.optional()
.describe('Allow overwriting an existing tasks.json file.'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: automatically detected from session or CWD)'
)
}),
execute: async (args, { log, session }) => {
try {
log.info(`Parsing PRD with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await parsePRDDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ session }
);
if (result.success) {
log.info(`Successfully parsed PRD: ${result.data.message}`);
} else {
log.error(
`Failed to parse PRD: ${result.error?.message || 'Unknown error'}`
);
}
return handleApiResult(result, log, 'Error parsing PRD');
} catch (error) {
log.error(`Error in parse-prd tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,58 +3,71 @@
* Tool for removing a dependency from a task
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { removeDependencyDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { removeDependencyDirect } from '../core/task-master-core.js';
/**
* Register the removeDependency tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerRemoveDependencyTool(server) {
server.addTool({
name: "remove_dependency",
description: "Remove a dependency from a task",
parameters: z.object({
id: z.string().describe("Task ID to remove dependency from"),
dependsOn: z.string().describe("Task ID to remove as a dependency"),
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await removeDependencyDirect({
projectRoot: rootFolder,
...args
}, log/*, { reportProgress, mcpLog: log, session}*/);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully removed dependency: ${result.data.message}`);
} else {
log.error(`Failed to remove dependency: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error removing dependency');
} catch (error) {
log.error(`Error in removeDependency tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}
server.addTool({
name: 'remove_dependency',
description: 'Remove a dependency from a task',
parameters: z.object({
id: z.string().describe('Task ID to remove dependency from'),
dependsOn: z.string().describe('Task ID to remove as a dependency'),
file: z
.string()
.optional()
.describe('Path to the tasks file (default: tasks/tasks.json)'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(
`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`
);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await removeDependencyDirect(
{
projectRoot: rootFolder,
...args
},
log /*, { reportProgress, mcpLog: log, session}*/
);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully removed dependency: ${result.data.message}`);
} else {
log.error(`Failed to remove dependency: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error removing dependency');
} catch (error) {
log.error(`Error in removeDependency tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,59 +3,82 @@
* Tool for removing subtasks from parent tasks
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { removeSubtaskDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { removeSubtaskDirect } from '../core/task-master-core.js';
/**
* Register the removeSubtask tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerRemoveSubtaskTool(server) {
server.addTool({
name: "remove_subtask",
description: "Remove a subtask from its parent task",
parameters: z.object({
id: z.string().describe("Subtask ID to remove in format 'parentId.subtaskId' (required)"),
convert: z.boolean().optional().describe("Convert the subtask to a standalone task instead of deleting it"),
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
skipGenerate: z.boolean().optional().describe("Skip regenerating task files"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await removeSubtaskDirect({
projectRoot: rootFolder,
...args
}, log/*, { reportProgress, mcpLog: log, session}*/);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Subtask removed successfully: ${result.data.message}`);
} else {
log.error(`Failed to remove subtask: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error removing subtask');
} catch (error) {
log.error(`Error in removeSubtask tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'remove_subtask',
description: 'Remove a subtask from its parent task',
parameters: z.object({
id: z
.string()
.describe(
"Subtask ID to remove in format 'parentId.subtaskId' (required)"
),
convert: z
.boolean()
.optional()
.describe(
'Convert the subtask to a standalone task instead of deleting it'
),
file: z
.string()
.optional()
.describe('Path to the tasks file (default: tasks/tasks.json)'),
skipGenerate: z
.boolean()
.optional()
.describe('Skip regenerating task files'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
// await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await removeSubtaskDirect(
{
projectRoot: rootFolder,
...args
},
log /*, { reportProgress, mcpLog: log, session}*/
);
// await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Subtask removed successfully: ${result.data.message}`);
} else {
log.error(`Failed to remove subtask: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error removing subtask');
} catch (error) {
log.error(`Error in removeSubtask tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,69 +3,79 @@
* Tool to remove a task by ID
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { removeTaskDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { removeTaskDirect } from '../core/task-master-core.js';
/**
* Register the remove-task tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerRemoveTaskTool(server) {
server.addTool({
name: "remove_task",
description: "Remove a task or subtask permanently from the tasks list",
parameters: z.object({
id: z.string().describe("ID of the task or subtask to remove (e.g., '5' or '5.2')"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
confirm: z.boolean().optional().describe("Whether to skip confirmation prompt (default: false)")
}),
execute: async (args, { log, session }) => {
try {
log.info(`Removing task with ID: ${args.id}`);
// Get project root from session
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
} else if (!rootFolder) {
// Ensure we have a default if nothing else works
rootFolder = process.cwd();
log.warn(`Session and args failed to provide root, using CWD: ${rootFolder}`);
}
log.info(`Using project root: ${rootFolder}`);
// Assume client has already handled confirmation if needed
const result = await removeTaskDirect({
id: args.id,
file: args.file,
projectRoot: rootFolder
}, log);
if (result.success) {
log.info(`Successfully removed task: ${args.id}`);
} else {
log.error(`Failed to remove task: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error removing task');
} catch (error) {
log.error(`Error in remove-task tool: ${error.message}`);
return createErrorResponse(`Failed to remove task: ${error.message}`);
}
},
});
}
server.addTool({
name: 'remove_task',
description: 'Remove a task or subtask permanently from the tasks list',
parameters: z.object({
id: z
.string()
.describe("ID of the task or subtask to remove (e.g., '5' or '5.2')"),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
),
confirm: z
.boolean()
.optional()
.describe('Whether to skip confirmation prompt (default: false)')
}),
execute: async (args, { log, session }) => {
try {
log.info(`Removing task with ID: ${args.id}`);
// Get project root from session
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
} else if (!rootFolder) {
// Ensure we have a default if nothing else works
rootFolder = process.cwd();
log.warn(
`Session and args failed to provide root, using CWD: ${rootFolder}`
);
}
log.info(`Using project root: ${rootFolder}`);
// Assume client has already handled confirmation if needed
const result = await removeTaskDirect(
{
id: args.id,
file: args.file,
projectRoot: rootFolder
},
log
);
if (result.success) {
log.info(`Successfully removed task: ${args.id}`);
} else {
log.error(`Failed to remove task: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error removing task');
} catch (error) {
log.error(`Error in remove-task tool: ${error.message}`);
return createErrorResponse(`Failed to remove task: ${error.message}`);
}
}
});
}

View File

@@ -3,68 +3,81 @@
* Tool to set the status of a task
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { setTaskStatusDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { setTaskStatusDirect } from '../core/task-master-core.js';
/**
* Register the setTaskStatus tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerSetTaskStatusTool(server) {
server.addTool({
name: "set_task_status",
description: "Set the status of one or more tasks or subtasks.",
parameters: z.object({
id: z
.string()
.describe("Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated for multiple updates."),
status: z
.string()
.describe("New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: automatically detected)"
),
}),
execute: async (args, { log, session }) => {
try {
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
// Get project root from session
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
// Call the direct function with the project root
const result = await setTaskStatusDirect({
...args,
projectRoot: rootFolder
}, log);
// Log the result
if (result.success) {
log.info(`Successfully updated status for task(s) ${args.id} to "${args.status}": ${result.data.message}`);
} else {
log.error(`Failed to update task status: ${result.error?.message || 'Unknown error'}`);
}
// Format and return the result
return handleApiResult(result, log, 'Error setting task status');
} catch (error) {
log.error(`Error in setTaskStatus tool: ${error.message}`);
return createErrorResponse(`Error setting task status: ${error.message}`);
}
},
});
server.addTool({
name: 'set_task_status',
description: 'Set the status of one or more tasks or subtasks.',
parameters: z.object({
id: z
.string()
.describe(
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated for multiple updates."
),
status: z
.string()
.describe(
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: automatically detected)'
)
}),
execute: async (args, { log, session }) => {
try {
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
// Get project root from session
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
// Call the direct function with the project root
const result = await setTaskStatusDirect(
{
...args,
projectRoot: rootFolder
},
log
);
// Log the result
if (result.success) {
log.info(
`Successfully updated status for task(s) ${args.id} to "${args.status}": ${result.data.message}`
);
} else {
log.error(
`Failed to update task status: ${result.error?.message || 'Unknown error'}`
);
}
// Format and return the result
return handleApiResult(result, log, 'Error setting task status');
} catch (error) {
log.error(`Error in setTaskStatus tool: ${error.message}`);
return createErrorResponse(
`Error setting task status: ${error.message}`
);
}
}
});
}

View File

@@ -3,61 +3,75 @@
* Tool to append additional information to a specific subtask
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { updateSubtaskByIdDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { updateSubtaskByIdDirect } from '../core/task-master-core.js';
/**
* Register the update-subtask tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerUpdateSubtaskTool(server) {
server.addTool({
name: "update_subtask",
description: "Appends additional information to a specific subtask without replacing existing content",
parameters: z.object({
id: z.string().describe("ID of the subtask to update in format \"parentId.subtaskId\" (e.g., \"5.2\")"),
prompt: z.string().describe("Information to add to the subtask"),
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log, session }) => {
try {
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await updateSubtaskByIdDirect({
projectRoot: rootFolder,
...args
}, log, { session });
if (result.success) {
log.info(`Successfully updated subtask with ID ${args.id}`);
} else {
log.error(`Failed to update subtask: ${result.error?.message || 'Unknown error'}`);
}
return handleApiResult(result, log, 'Error updating subtask');
} catch (error) {
log.error(`Error in update_subtask tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'update_subtask',
description:
'Appends additional information to a specific subtask without replacing existing content',
parameters: z.object({
id: z
.string()
.describe(
'ID of the subtask to update in format "parentId.subtaskId" (e.g., "5.2")'
),
prompt: z.string().describe('Information to add to the subtask'),
research: z
.boolean()
.optional()
.describe('Use Perplexity AI for research-backed updates'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session }) => {
try {
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await updateSubtaskByIdDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ session }
);
if (result.success) {
log.info(`Successfully updated subtask with ID ${args.id}`);
} else {
log.error(
`Failed to update subtask: ${result.error?.message || 'Unknown error'}`
);
}
return handleApiResult(result, log, 'Error updating subtask');
} catch (error) {
log.error(`Error in update_subtask tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,61 +3,75 @@
* Tool to update a single task by ID with new information
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { updateTaskByIdDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { updateTaskByIdDirect } from '../core/task-master-core.js';
/**
* Register the update-task tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerUpdateTaskTool(server) {
server.addTool({
name: "update_task",
description: "Updates a single task by ID with new information or context provided in the prompt.",
parameters: z.object({
id: z.string().describe("ID of the task or subtask (e.g., '15', '15.2') to update"),
prompt: z.string().describe("New information or context to incorporate into the task"),
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log, session }) => {
try {
log.info(`Updating task with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await updateTaskByIdDirect({
projectRoot: rootFolder,
...args
}, log, { session });
if (result.success) {
log.info(`Successfully updated task with ID ${args.id}`);
} else {
log.error(`Failed to update task: ${result.error?.message || 'Unknown error'}`);
}
return handleApiResult(result, log, 'Error updating task');
} catch (error) {
log.error(`Error in update_task tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'update_task',
description:
'Updates a single task by ID with new information or context provided in the prompt.',
parameters: z.object({
id: z
.string()
.describe("ID of the task or subtask (e.g., '15', '15.2') to update"),
prompt: z
.string()
.describe('New information or context to incorporate into the task'),
research: z
.boolean()
.optional()
.describe('Use Perplexity AI for research-backed updates'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session }) => {
try {
log.info(`Updating task with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await updateTaskByIdDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ session }
);
if (result.success) {
log.info(`Successfully updated task with ID ${args.id}`);
} else {
log.error(
`Failed to update task: ${result.error?.message || 'Unknown error'}`
);
}
return handleApiResult(result, log, 'Error updating task');
} catch (error) {
log.error(`Error in update_task tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,61 +3,79 @@
* Tool to update tasks based on new context/prompt
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { updateTasksDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { updateTasksDirect } from '../core/task-master-core.js';
/**
* Register the update tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerUpdateTool(server) {
server.addTool({
name: "update",
description: "Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt. Use 'update_task' instead for a single specific task.",
parameters: z.object({
from: z.string().describe("Task ID from which to start updating (inclusive). IMPORTANT: This tool uses 'from', not 'id'"),
prompt: z.string().describe("Explanation of changes or new context to apply"),
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z
.string()
.optional()
.describe(
"Root directory of the project (default: current working directory)"
),
}),
execute: async (args, { log, session }) => {
try {
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await updateTasksDirect({
projectRoot: rootFolder,
...args
}, log, { session });
if (result.success) {
log.info(`Successfully updated tasks from ID ${args.from}: ${result.data.message}`);
} else {
log.error(`Failed to update tasks: ${result.error?.message || 'Unknown error'}`);
}
return handleApiResult(result, log, 'Error updating tasks');
} catch (error) {
log.error(`Error in update tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'update',
description:
"Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt. Use 'update_task' instead for a single specific task.",
parameters: z.object({
from: z
.string()
.describe(
"Task ID from which to start updating (inclusive). IMPORTANT: This tool uses 'from', not 'id'"
),
prompt: z
.string()
.describe('Explanation of changes or new context to apply'),
research: z
.boolean()
.optional()
.describe('Use Perplexity AI for research-backed updates'),
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session }) => {
try {
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await updateTasksDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ session }
);
if (result.success) {
log.info(
`Successfully updated tasks from ID ${args.from}: ${result.data.message}`
);
} else {
log.error(
`Failed to update tasks: ${result.error?.message || 'Unknown error'}`
);
}
return handleApiResult(result, log, 'Error updating tasks');
} catch (error) {
log.error(`Error in update tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -3,68 +3,83 @@
* Utility functions for Task Master CLI integration
*/
import { spawnSync } from "child_process";
import path from "path";
import { spawnSync } from 'child_process';
import path from 'path';
import fs from 'fs';
import { contextManager } from '../core/context-manager.js'; // Import the singleton
// Import path utilities to ensure consistent path resolution
import { lastFoundProjectRoot, PROJECT_MARKERS } from '../core/utils/path-utils.js';
import {
lastFoundProjectRoot,
PROJECT_MARKERS
} from '../core/utils/path-utils.js';
/**
* Get normalized project root path
* Get normalized project root path
* @param {string|undefined} projectRootRaw - Raw project root from arguments
* @param {Object} log - Logger object
* @returns {string} - Normalized absolute path to project root
*/
function getProjectRoot(projectRootRaw, log) {
// PRECEDENCE ORDER:
// 1. Environment variable override
// 2. Explicitly provided projectRoot in args
// 3. Previously found/cached project root
// 4. Current directory if it has project markers
// 5. Current directory with warning
// 1. Check for environment variable override
if (process.env.TASK_MASTER_PROJECT_ROOT) {
const envRoot = process.env.TASK_MASTER_PROJECT_ROOT;
const absolutePath = path.isAbsolute(envRoot)
? envRoot
: path.resolve(process.cwd(), envRoot);
log.info(`Using project root from TASK_MASTER_PROJECT_ROOT environment variable: ${absolutePath}`);
return absolutePath;
}
// PRECEDENCE ORDER:
// 1. Environment variable override
// 2. Explicitly provided projectRoot in args
// 3. Previously found/cached project root
// 4. Current directory if it has project markers
// 5. Current directory with warning
// 2. If project root is explicitly provided, use it
if (projectRootRaw) {
const absolutePath = path.isAbsolute(projectRootRaw)
? projectRootRaw
: path.resolve(process.cwd(), projectRootRaw);
log.info(`Using explicitly provided project root: ${absolutePath}`);
return absolutePath;
}
// 3. If we have a last found project root from a tasks.json search, use that for consistency
if (lastFoundProjectRoot) {
log.info(`Using last known project root where tasks.json was found: ${lastFoundProjectRoot}`);
return lastFoundProjectRoot;
}
// 4. Check if the current directory has any indicators of being a task-master project
const currentDir = process.cwd();
if (PROJECT_MARKERS.some(marker => {
const markerPath = path.join(currentDir, marker);
return fs.existsSync(markerPath);
})) {
log.info(`Using current directory as project root (found project markers): ${currentDir}`);
return currentDir;
}
// 5. Default to current working directory but warn the user
log.warn(`No task-master project detected in current directory. Using ${currentDir} as project root.`);
log.warn('Consider using --project-root to specify the correct project location or set TASK_MASTER_PROJECT_ROOT environment variable.');
return currentDir;
// 1. Check for environment variable override
if (process.env.TASK_MASTER_PROJECT_ROOT) {
const envRoot = process.env.TASK_MASTER_PROJECT_ROOT;
const absolutePath = path.isAbsolute(envRoot)
? envRoot
: path.resolve(process.cwd(), envRoot);
log.info(
`Using project root from TASK_MASTER_PROJECT_ROOT environment variable: ${absolutePath}`
);
return absolutePath;
}
// 2. If project root is explicitly provided, use it
if (projectRootRaw) {
const absolutePath = path.isAbsolute(projectRootRaw)
? projectRootRaw
: path.resolve(process.cwd(), projectRootRaw);
log.info(`Using explicitly provided project root: ${absolutePath}`);
return absolutePath;
}
// 3. If we have a last found project root from a tasks.json search, use that for consistency
if (lastFoundProjectRoot) {
log.info(
`Using last known project root where tasks.json was found: ${lastFoundProjectRoot}`
);
return lastFoundProjectRoot;
}
// 4. Check if the current directory has any indicators of being a task-master project
const currentDir = process.cwd();
if (
PROJECT_MARKERS.some((marker) => {
const markerPath = path.join(currentDir, marker);
return fs.existsSync(markerPath);
})
) {
log.info(
`Using current directory as project root (found project markers): ${currentDir}`
);
return currentDir;
}
// 5. Default to current working directory but warn the user
log.warn(
`No task-master project detected in current directory. Using ${currentDir} as project root.`
);
log.warn(
'Consider using --project-root to specify the correct project location or set TASK_MASTER_PROJECT_ROOT environment variable.'
);
return currentDir;
}
/**
@@ -74,81 +89,87 @@ function getProjectRoot(projectRootRaw, log) {
* @returns {string|null} - The absolute path to the project root, or null if not found.
*/
function getProjectRootFromSession(session, log) {
try {
// Add detailed logging of session structure
log.info(`Session object: ${JSON.stringify({
hasSession: !!session,
hasRoots: !!session?.roots,
rootsType: typeof session?.roots,
isRootsArray: Array.isArray(session?.roots),
rootsLength: session?.roots?.length,
firstRoot: session?.roots?.[0],
hasRootsRoots: !!session?.roots?.roots,
rootsRootsType: typeof session?.roots?.roots,
isRootsRootsArray: Array.isArray(session?.roots?.roots),
rootsRootsLength: session?.roots?.roots?.length,
firstRootsRoot: session?.roots?.roots?.[0]
})}`);
// ALWAYS ensure we return a valid path for project root
const cwd = process.cwd();
// If we have a session with roots array
if (session?.roots?.[0]?.uri) {
const rootUri = session.roots[0].uri;
log.info(`Found rootUri in session.roots[0].uri: ${rootUri}`);
const rootPath = rootUri.startsWith('file://')
? decodeURIComponent(rootUri.slice(7))
: rootUri;
log.info(`Decoded rootPath: ${rootPath}`);
return rootPath;
}
// If we have a session with roots.roots array (different structure)
if (session?.roots?.roots?.[0]?.uri) {
const rootUri = session.roots.roots[0].uri;
log.info(`Found rootUri in session.roots.roots[0].uri: ${rootUri}`);
const rootPath = rootUri.startsWith('file://')
? decodeURIComponent(rootUri.slice(7))
: rootUri;
log.info(`Decoded rootPath: ${rootPath}`);
return rootPath;
}
try {
// Add detailed logging of session structure
log.info(
`Session object: ${JSON.stringify({
hasSession: !!session,
hasRoots: !!session?.roots,
rootsType: typeof session?.roots,
isRootsArray: Array.isArray(session?.roots),
rootsLength: session?.roots?.length,
firstRoot: session?.roots?.[0],
hasRootsRoots: !!session?.roots?.roots,
rootsRootsType: typeof session?.roots?.roots,
isRootsRootsArray: Array.isArray(session?.roots?.roots),
rootsRootsLength: session?.roots?.roots?.length,
firstRootsRoot: session?.roots?.roots?.[0]
})}`
);
// Get the server's location and try to find project root -- this is a fallback necessary in Cursor IDE
const serverPath = process.argv[1]; // This should be the path to server.js, which is in mcp-server/
if (serverPath && serverPath.includes('mcp-server')) {
// Find the mcp-server directory first
const mcpServerIndex = serverPath.indexOf('mcp-server');
if (mcpServerIndex !== -1) {
// Get the path up to mcp-server, which should be the project root
const projectRoot = serverPath.substring(0, mcpServerIndex - 1); // -1 to remove trailing slash
// Verify this looks like our project root by checking for key files/directories
if (fs.existsSync(path.join(projectRoot, '.cursor')) ||
fs.existsSync(path.join(projectRoot, 'mcp-server')) ||
fs.existsSync(path.join(projectRoot, 'package.json'))) {
log.info(`Found project root from server path: ${projectRoot}`);
return projectRoot;
}
}
}
// ALWAYS ensure we return a valid path for project root
const cwd = process.cwd();
// ALWAYS ensure we return a valid path as a last resort
log.info(`Using current working directory as ultimate fallback: ${cwd}`);
return cwd;
} catch (e) {
// If we have a server path, use it as a basis for project root
const serverPath = process.argv[1];
if (serverPath && serverPath.includes('mcp-server')) {
const mcpServerIndex = serverPath.indexOf('mcp-server');
return mcpServerIndex !== -1 ? serverPath.substring(0, mcpServerIndex - 1) : process.cwd();
}
// Only use cwd if it's not "/"
const cwd = process.cwd();
return cwd !== '/' ? cwd : '/';
}
// If we have a session with roots array
if (session?.roots?.[0]?.uri) {
const rootUri = session.roots[0].uri;
log.info(`Found rootUri in session.roots[0].uri: ${rootUri}`);
const rootPath = rootUri.startsWith('file://')
? decodeURIComponent(rootUri.slice(7))
: rootUri;
log.info(`Decoded rootPath: ${rootPath}`);
return rootPath;
}
// If we have a session with roots.roots array (different structure)
if (session?.roots?.roots?.[0]?.uri) {
const rootUri = session.roots.roots[0].uri;
log.info(`Found rootUri in session.roots.roots[0].uri: ${rootUri}`);
const rootPath = rootUri.startsWith('file://')
? decodeURIComponent(rootUri.slice(7))
: rootUri;
log.info(`Decoded rootPath: ${rootPath}`);
return rootPath;
}
// Get the server's location and try to find project root -- this is a fallback necessary in Cursor IDE
const serverPath = process.argv[1]; // This should be the path to server.js, which is in mcp-server/
if (serverPath && serverPath.includes('mcp-server')) {
// Find the mcp-server directory first
const mcpServerIndex = serverPath.indexOf('mcp-server');
if (mcpServerIndex !== -1) {
// Get the path up to mcp-server, which should be the project root
const projectRoot = serverPath.substring(0, mcpServerIndex - 1); // -1 to remove trailing slash
// Verify this looks like our project root by checking for key files/directories
if (
fs.existsSync(path.join(projectRoot, '.cursor')) ||
fs.existsSync(path.join(projectRoot, 'mcp-server')) ||
fs.existsSync(path.join(projectRoot, 'package.json'))
) {
log.info(`Found project root from server path: ${projectRoot}`);
return projectRoot;
}
}
}
// ALWAYS ensure we return a valid path as a last resort
log.info(`Using current working directory as ultimate fallback: ${cwd}`);
return cwd;
} catch (e) {
// If we have a server path, use it as a basis for project root
const serverPath = process.argv[1];
if (serverPath && serverPath.includes('mcp-server')) {
const mcpServerIndex = serverPath.indexOf('mcp-server');
return mcpServerIndex !== -1
? serverPath.substring(0, mcpServerIndex - 1)
: process.cwd();
}
// Only use cwd if it's not "/"
const cwd = process.cwd();
return cwd !== '/' ? cwd : '/';
}
}
/**
@@ -159,28 +180,35 @@ function getProjectRootFromSession(session, log) {
* @param {Function} processFunction - Optional function to process successful result data
* @returns {Object} - Standardized MCP response object
*/
function handleApiResult(result, log, errorPrefix = 'API error', processFunction = processMCPResponseData) {
if (!result.success) {
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
// Include cache status in error logs
log.error(`${errorPrefix}: ${errorMsg}. From cache: ${result.fromCache}`); // Keep logging cache status on error
return createErrorResponse(errorMsg);
}
// Process the result data if needed
const processedData = processFunction ? processFunction(result.data) : result.data;
// Log success including cache status
log.info(`Successfully completed operation. From cache: ${result.fromCache}`); // Add success log with cache status
function handleApiResult(
result,
log,
errorPrefix = 'API error',
processFunction = processMCPResponseData
) {
if (!result.success) {
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
// Include cache status in error logs
log.error(`${errorPrefix}: ${errorMsg}. From cache: ${result.fromCache}`); // Keep logging cache status on error
return createErrorResponse(errorMsg);
}
// Create the response payload including the fromCache flag
const responsePayload = {
fromCache: result.fromCache, // Get the flag from the original 'result'
data: processedData // Nest the processed data under a 'data' key
};
// Pass this combined payload to createContentResponse
return createContentResponse(responsePayload);
// Process the result data if needed
const processedData = processFunction
? processFunction(result.data)
: result.data;
// Log success including cache status
log.info(`Successfully completed operation. From cache: ${result.fromCache}`); // Add success log with cache status
// Create the response payload including the fromCache flag
const responsePayload = {
fromCache: result.fromCache, // Get the flag from the original 'result'
data: processedData // Nest the processed data under a 'data' key
};
// Pass this combined payload to createContentResponse
return createContentResponse(responsePayload);
}
/**
@@ -193,75 +221,75 @@ function handleApiResult(result, log, errorPrefix = 'API error', processFunction
* @returns {Object} - The result of the command execution
*/
function executeTaskMasterCommand(
command,
log,
args = [],
projectRootRaw = null,
customEnv = null // Changed from session to customEnv
command,
log,
args = [],
projectRootRaw = null,
customEnv = null // Changed from session to customEnv
) {
try {
// Normalize project root internally using the getProjectRoot utility
const cwd = getProjectRoot(projectRootRaw, log);
try {
// Normalize project root internally using the getProjectRoot utility
const cwd = getProjectRoot(projectRootRaw, log);
log.info(
`Executing task-master ${command} with args: ${JSON.stringify(
args
)} in directory: ${cwd}`
);
log.info(
`Executing task-master ${command} with args: ${JSON.stringify(
args
)} in directory: ${cwd}`
);
// Prepare full arguments array
const fullArgs = [command, ...args];
// Prepare full arguments array
const fullArgs = [command, ...args];
// Common options for spawn
const spawnOptions = {
encoding: "utf8",
cwd: cwd,
// Merge process.env with customEnv, giving precedence to customEnv
env: { ...process.env, ...(customEnv || {}) }
};
// Common options for spawn
const spawnOptions = {
encoding: 'utf8',
cwd: cwd,
// Merge process.env with customEnv, giving precedence to customEnv
env: { ...process.env, ...(customEnv || {}) }
};
// Log the environment being passed (optional, for debugging)
// log.info(`Spawn options env: ${JSON.stringify(spawnOptions.env)}`);
// Log the environment being passed (optional, for debugging)
// log.info(`Spawn options env: ${JSON.stringify(spawnOptions.env)}`);
// Execute the command using the global task-master CLI or local script
// Try the global CLI first
let result = spawnSync("task-master", fullArgs, spawnOptions);
// Execute the command using the global task-master CLI or local script
// Try the global CLI first
let result = spawnSync('task-master', fullArgs, spawnOptions);
// If global CLI is not available, try fallback to the local script
if (result.error && result.error.code === "ENOENT") {
log.info("Global task-master not found, falling back to local script");
// Pass the same spawnOptions (including env) to the fallback
result = spawnSync("node", ["scripts/dev.js", ...fullArgs], spawnOptions);
}
// If global CLI is not available, try fallback to the local script
if (result.error && result.error.code === 'ENOENT') {
log.info('Global task-master not found, falling back to local script');
// Pass the same spawnOptions (including env) to the fallback
result = spawnSync('node', ['scripts/dev.js', ...fullArgs], spawnOptions);
}
if (result.error) {
throw new Error(`Command execution error: ${result.error.message}`);
}
if (result.error) {
throw new Error(`Command execution error: ${result.error.message}`);
}
if (result.status !== 0) {
// Improve error handling by combining stderr and stdout if stderr is empty
const errorOutput = result.stderr
? result.stderr.trim()
: result.stdout
? result.stdout.trim()
: "Unknown error";
throw new Error(
`Command failed with exit code ${result.status}: ${errorOutput}`
);
}
if (result.status !== 0) {
// Improve error handling by combining stderr and stdout if stderr is empty
const errorOutput = result.stderr
? result.stderr.trim()
: result.stdout
? result.stdout.trim()
: 'Unknown error';
throw new Error(
`Command failed with exit code ${result.status}: ${errorOutput}`
);
}
return {
success: true,
stdout: result.stdout,
stderr: result.stderr,
};
} catch (error) {
log.error(`Error executing task-master command: ${error.message}`);
return {
success: false,
error: error.message,
};
}
return {
success: true,
stdout: result.stdout,
stderr: result.stderr
};
} catch (error) {
log.error(`Error executing task-master command: ${error.message}`);
return {
success: false,
error: error.message
};
}
}
/**
@@ -277,40 +305,44 @@ function executeTaskMasterCommand(
* Format: { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
*/
async function getCachedOrExecute({ cacheKey, actionFn, log }) {
// Check cache first
const cachedResult = contextManager.getCachedData(cacheKey);
if (cachedResult !== undefined) {
log.info(`Cache hit for key: ${cacheKey}`);
// Return the cached data in the same structure as a fresh result
return {
...cachedResult, // Spread the cached result to maintain its structure
fromCache: true // Just add the fromCache flag
};
}
// Check cache first
const cachedResult = contextManager.getCachedData(cacheKey);
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
// Execute the action function if cache missed
const result = await actionFn();
// If the action was successful, cache the result (but without fromCache flag)
if (result.success && result.data !== undefined) {
log.info(`Action successful. Caching result for key: ${cacheKey}`);
// Cache the entire result structure (minus the fromCache flag)
const { fromCache, ...resultToCache } = result;
contextManager.setCachedData(cacheKey, resultToCache);
} else if (!result.success) {
log.warn(`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`);
} else {
log.warn(`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`);
}
// Return the fresh result, indicating it wasn't from cache
return {
...result,
fromCache: false
};
if (cachedResult !== undefined) {
log.info(`Cache hit for key: ${cacheKey}`);
// Return the cached data in the same structure as a fresh result
return {
...cachedResult, // Spread the cached result to maintain its structure
fromCache: true // Just add the fromCache flag
};
}
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
// Execute the action function if cache missed
const result = await actionFn();
// If the action was successful, cache the result (but without fromCache flag)
if (result.success && result.data !== undefined) {
log.info(`Action successful. Caching result for key: ${cacheKey}`);
// Cache the entire result structure (minus the fromCache flag)
const { fromCache, ...resultToCache } = result;
contextManager.setCachedData(cacheKey, resultToCache);
} else if (!result.success) {
log.warn(
`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`
);
} else {
log.warn(
`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`
);
}
// Return the fresh result, indicating it wasn't from cache
return {
...result,
fromCache: false
};
}
/**
@@ -320,56 +352,68 @@ async function getCachedOrExecute({ cacheKey, actionFn, log }) {
* @param {string[]} fieldsToRemove - An array of field names to remove.
* @returns {Object|Array} - The processed data with specified fields removed.
*/
function processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testStrategy']) {
if (!taskOrData) {
return taskOrData;
}
function processMCPResponseData(
taskOrData,
fieldsToRemove = ['details', 'testStrategy']
) {
if (!taskOrData) {
return taskOrData;
}
// Helper function to process a single task object
const processSingleTask = (task) => {
if (typeof task !== 'object' || task === null) {
return task;
}
const processedTask = { ...task };
// Remove specified fields from the task
fieldsToRemove.forEach(field => {
delete processedTask[field];
});
// Helper function to process a single task object
const processSingleTask = (task) => {
if (typeof task !== 'object' || task === null) {
return task;
}
// Recursively process subtasks if they exist and are an array
if (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {
// Use processArrayOfTasks to handle the subtasks array
processedTask.subtasks = processArrayOfTasks(processedTask.subtasks);
}
return processedTask;
};
// Helper function to process an array of tasks
const processArrayOfTasks = (tasks) => {
return tasks.map(processSingleTask);
};
const processedTask = { ...task };
// Check if the input is a data structure containing a 'tasks' array (like from listTasks)
if (typeof taskOrData === 'object' && taskOrData !== null && Array.isArray(taskOrData.tasks)) {
return {
...taskOrData, // Keep other potential fields like 'stats', 'filter'
tasks: processArrayOfTasks(taskOrData.tasks),
};
}
// Check if the input is likely a single task object (add more checks if needed)
else if (typeof taskOrData === 'object' && taskOrData !== null && 'id' in taskOrData && 'title' in taskOrData) {
return processSingleTask(taskOrData);
}
// Check if the input is an array of tasks directly (less common but possible)
else if (Array.isArray(taskOrData)) {
return processArrayOfTasks(taskOrData);
}
// If it doesn't match known task structures, return it as is
return taskOrData;
// Remove specified fields from the task
fieldsToRemove.forEach((field) => {
delete processedTask[field];
});
// Recursively process subtasks if they exist and are an array
if (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {
// Use processArrayOfTasks to handle the subtasks array
processedTask.subtasks = processArrayOfTasks(processedTask.subtasks);
}
return processedTask;
};
// Helper function to process an array of tasks
const processArrayOfTasks = (tasks) => {
return tasks.map(processSingleTask);
};
// Check if the input is a data structure containing a 'tasks' array (like from listTasks)
if (
typeof taskOrData === 'object' &&
taskOrData !== null &&
Array.isArray(taskOrData.tasks)
) {
return {
...taskOrData, // Keep other potential fields like 'stats', 'filter'
tasks: processArrayOfTasks(taskOrData.tasks)
};
}
// Check if the input is likely a single task object (add more checks if needed)
else if (
typeof taskOrData === 'object' &&
taskOrData !== null &&
'id' in taskOrData &&
'title' in taskOrData
) {
return processSingleTask(taskOrData);
}
// Check if the input is an array of tasks directly (less common but possible)
else if (Array.isArray(taskOrData)) {
return processArrayOfTasks(taskOrData);
}
// If it doesn't match known task structures, return it as is
return taskOrData;
}
/**
@@ -378,19 +422,20 @@ function processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testSt
* @returns {Object} - Content response object in FastMCP format
*/
function createContentResponse(content) {
// FastMCP requires text type, so we format objects as JSON strings
return {
content: [
{
type: "text",
text: typeof content === 'object' ?
// Format JSON nicely with indentation
JSON.stringify(content, null, 2) :
// Keep other content types as-is
String(content)
}
]
};
// FastMCP requires text type, so we format objects as JSON strings
return {
content: [
{
type: 'text',
text:
typeof content === 'object'
? // Format JSON nicely with indentation
JSON.stringify(content, null, 2)
: // Keep other content types as-is
String(content)
}
]
};
}
/**
@@ -399,24 +444,24 @@ function createContentResponse(content) {
* @returns {Object} - Error content response object in FastMCP format
*/
export function createErrorResponse(errorMessage) {
return {
content: [
{
type: "text",
text: `Error: ${errorMessage}`
}
],
isError: true
};
return {
content: [
{
type: 'text',
text: `Error: ${errorMessage}`
}
],
isError: true
};
}
// Ensure all functions are exported
export {
getProjectRoot,
getProjectRootFromSession,
handleApiResult,
executeTaskMasterCommand,
getCachedOrExecute,
processMCPResponseData,
createContentResponse,
getProjectRoot,
getProjectRootFromSession,
handleApiResult,
executeTaskMasterCommand,
getCachedOrExecute,
processMCPResponseData,
createContentResponse
};

View File

@@ -3,56 +3,68 @@
* Tool for validating task dependencies
*/
import { z } from "zod";
import { z } from 'zod';
import {
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from "./utils.js";
import { validateDependenciesDirect } from "../core/task-master-core.js";
handleApiResult,
createErrorResponse,
getProjectRootFromSession
} from './utils.js';
import { validateDependenciesDirect } from '../core/task-master-core.js';
/**
* Register the validateDependencies tool with the MCP server
* @param {Object} server - FastMCP server instance
*/
export function registerValidateDependenciesTool(server) {
server.addTool({
name: "validate_dependencies",
description: "Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.",
parameters: z.object({
file: z.string().optional().describe("Path to the tasks file"),
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Validating dependencies with args: ${JSON.stringify(args)}`);
await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await validateDependenciesDirect({
projectRoot: rootFolder,
...args
}, log, { reportProgress, mcpLog: log, session});
await reportProgress({ progress: 100 });
if (result.success) {
log.info(`Successfully validated dependencies: ${result.data.message}`);
} else {
log.error(`Failed to validate dependencies: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error validating dependencies');
} catch (error) {
log.error(`Error in validateDependencies tool: ${error.message}`);
return createErrorResponse(error.message);
}
},
});
}
server.addTool({
name: 'validate_dependencies',
description:
'Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.',
parameters: z.object({
file: z.string().optional().describe('Path to the tasks file'),
projectRoot: z
.string()
.optional()
.describe(
'Root directory of the project (default: current working directory)'
)
}),
execute: async (args, { log, session, reportProgress }) => {
try {
log.info(`Validating dependencies with args: ${JSON.stringify(args)}`);
await reportProgress({ progress: 0 });
let rootFolder = getProjectRootFromSession(session, log);
if (!rootFolder && args.projectRoot) {
rootFolder = args.projectRoot;
log.info(`Using project root from args as fallback: ${rootFolder}`);
}
const result = await validateDependenciesDirect(
{
projectRoot: rootFolder,
...args
},
log,
{ reportProgress, mcpLog: log, session }
);
await reportProgress({ progress: 100 });
if (result.success) {
log.info(
`Successfully validated dependencies: ${result.data.message}`
);
} else {
log.error(`Failed to validate dependencies: ${result.error.message}`);
}
return handleApiResult(result, log, 'Error validating dependencies');
} catch (error) {
log.error(`Error in validateDependencies tool: ${error.message}`);
return createErrorResponse(error.message);
}
}
});
}

View File

@@ -8,64 +8,68 @@ import fs from 'fs';
console.error(`Current working directory: ${process.cwd()}`);
try {
console.error('Attempting to load FastMCP Config...');
// Check if .cursor/mcp.json exists
const mcpPath = path.join(process.cwd(), '.cursor', 'mcp.json');
console.error(`Checking if mcp.json exists at: ${mcpPath}`);
if (fs.existsSync(mcpPath)) {
console.error('mcp.json file found');
console.error(`File content: ${JSON.stringify(JSON.parse(fs.readFileSync(mcpPath, 'utf8')), null, 2)}`);
} else {
console.error('mcp.json file not found');
}
// Try to create Config
const config = new Config();
console.error('Config created successfully');
// Check if env property exists
if (config.env) {
console.error(`Config.env exists with keys: ${Object.keys(config.env).join(', ')}`);
// Print each env var value (careful with sensitive values)
for (const [key, value] of Object.entries(config.env)) {
if (key.includes('KEY')) {
console.error(`${key}: [value hidden]`);
} else {
console.error(`${key}: ${value}`);
}
}
} else {
console.error('Config.env does not exist');
}
console.error('Attempting to load FastMCP Config...');
// Check if .cursor/mcp.json exists
const mcpPath = path.join(process.cwd(), '.cursor', 'mcp.json');
console.error(`Checking if mcp.json exists at: ${mcpPath}`);
if (fs.existsSync(mcpPath)) {
console.error('mcp.json file found');
console.error(
`File content: ${JSON.stringify(JSON.parse(fs.readFileSync(mcpPath, 'utf8')), null, 2)}`
);
} else {
console.error('mcp.json file not found');
}
// Try to create Config
const config = new Config();
console.error('Config created successfully');
// Check if env property exists
if (config.env) {
console.error(
`Config.env exists with keys: ${Object.keys(config.env).join(', ')}`
);
// Print each env var value (careful with sensitive values)
for (const [key, value] of Object.entries(config.env)) {
if (key.includes('KEY')) {
console.error(`${key}: [value hidden]`);
} else {
console.error(`${key}: ${value}`);
}
}
} else {
console.error('Config.env does not exist');
}
} catch (error) {
console.error(`Error loading Config: ${error.message}`);
console.error(`Stack trace: ${error.stack}`);
console.error(`Error loading Config: ${error.message}`);
console.error(`Stack trace: ${error.stack}`);
}
// Log process.env to see if values from mcp.json were loaded automatically
console.error('\nChecking if process.env already has values from mcp.json:');
const envVars = [
'ANTHROPIC_API_KEY',
'PERPLEXITY_API_KEY',
'MODEL',
'PERPLEXITY_MODEL',
'MAX_TOKENS',
'TEMPERATURE',
'DEFAULT_SUBTASKS',
'DEFAULT_PRIORITY'
'ANTHROPIC_API_KEY',
'PERPLEXITY_API_KEY',
'MODEL',
'PERPLEXITY_MODEL',
'MAX_TOKENS',
'TEMPERATURE',
'DEFAULT_SUBTASKS',
'DEFAULT_PRIORITY'
];
for (const varName of envVars) {
if (process.env[varName]) {
if (varName.includes('KEY')) {
console.error(`${varName}: [value hidden]`);
} else {
console.error(`${varName}: ${process.env[varName]}`);
}
} else {
console.error(`${varName}: not set`);
}
}
if (process.env[varName]) {
if (varName.includes('KEY')) {
console.error(`${varName}: [value hidden]`);
} else {
console.error(`${varName}: ${process.env[varName]}`);
}
} else {
console.error(`${varName}: not set`);
}
}

View File

@@ -1,6 +1,6 @@
{
"key": "value",
"nested": {
"prop": true
}
}
"key": "value",
"nested": {
"prop": true
}
}

16083
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,96 +1,99 @@
{
"name": "task-master-ai",
"version": "0.10.1",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",
"bin": {
"task-master": "bin/task-master.js",
"task-master-init": "bin/task-master-init.js",
"task-master-mcp": "mcp-server/server.js",
"task-master-mcp-server": "mcp-server/server.js"
},
"scripts": {
"test": "node --experimental-vm-modules node_modules/.bin/jest",
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
"prepare-package": "node scripts/prepare-package.js",
"prepublishOnly": "npm run prepare-package",
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js mcp-server/server.js",
"changeset": "changeset",
"release": "changeset publish",
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js",
"mcp-server": "node mcp-server/server.js"
},
"keywords": [
"claude",
"task",
"management",
"ai",
"development",
"cursor",
"anthropic",
"llm",
"mcp",
"context"
],
"author": "Eyal Toledano",
"license": "MIT WITH Commons-Clause",
"dependencies": {
"@anthropic-ai/sdk": "^0.39.0",
"boxen": "^8.0.1",
"chalk": "^4.1.2",
"cli-table3": "^0.6.5",
"commander": "^11.1.0",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"express": "^4.21.2",
"fastmcp": "^1.20.5",
"figlet": "^1.8.0",
"fuse.js": "^7.0.0",
"gradient-string": "^3.0.0",
"helmet": "^8.1.0",
"inquirer": "^12.5.0",
"jsonwebtoken": "^9.0.2",
"lru-cache": "^10.2.0",
"openai": "^4.89.0",
"ora": "^8.2.0",
"uuid": "^11.1.0"
},
"engines": {
"node": ">=14.0.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/eyaltoledano/claude-task-master.git"
},
"homepage": "https://github.com/eyaltoledano/claude-task-master#readme",
"bugs": {
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
},
"files": [
"scripts/init.js",
"scripts/dev.js",
"scripts/modules/**",
"assets/**",
".cursor/**",
"README-task-master.md",
"index.js",
"bin/**",
"mcp-server/**"
],
"overrides": {
"node-fetch": "^3.3.2",
"whatwg-url": "^11.0.0"
},
"devDependencies": {
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",
"@types/jest": "^29.5.14",
"jest": "^29.7.0",
"jest-environment-node": "^29.7.0",
"mock-fs": "^5.5.0",
"supertest": "^7.1.0"
}
"name": "task-master-ai",
"version": "0.10.1",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",
"bin": {
"task-master": "bin/task-master.js",
"task-master-init": "bin/task-master-init.js",
"task-master-mcp": "mcp-server/server.js",
"task-master-mcp-server": "mcp-server/server.js"
},
"scripts": {
"test": "node --experimental-vm-modules node_modules/.bin/jest",
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
"prepare-package": "node scripts/prepare-package.js",
"prepublishOnly": "npm run prepare-package",
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js mcp-server/server.js",
"changeset": "changeset",
"release": "changeset publish",
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js",
"mcp-server": "node mcp-server/server.js",
"format-check": "prettier --check .",
"format": "prettier --write ."
},
"keywords": [
"claude",
"task",
"management",
"ai",
"development",
"cursor",
"anthropic",
"llm",
"mcp",
"context"
],
"author": "Eyal Toledano",
"license": "MIT WITH Commons-Clause",
"dependencies": {
"@anthropic-ai/sdk": "^0.39.0",
"boxen": "^8.0.1",
"chalk": "^4.1.2",
"cli-table3": "^0.6.5",
"commander": "^11.1.0",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
"express": "^4.21.2",
"fastmcp": "^1.20.5",
"figlet": "^1.8.0",
"fuse.js": "^7.0.0",
"gradient-string": "^3.0.0",
"helmet": "^8.1.0",
"inquirer": "^12.5.0",
"jsonwebtoken": "^9.0.2",
"lru-cache": "^10.2.0",
"openai": "^4.89.0",
"ora": "^8.2.0",
"uuid": "^11.1.0"
},
"engines": {
"node": ">=14.0.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/eyaltoledano/claude-task-master.git"
},
"homepage": "https://github.com/eyaltoledano/claude-task-master#readme",
"bugs": {
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
},
"files": [
"scripts/init.js",
"scripts/dev.js",
"scripts/modules/**",
"assets/**",
".cursor/**",
"README-task-master.md",
"index.js",
"bin/**",
"mcp-server/**"
],
"overrides": {
"node-fetch": "^3.3.2",
"whatwg-url": "^11.0.0"
},
"devDependencies": {
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",
"@types/jest": "^29.5.14",
"jest": "^29.7.0",
"jest-environment-node": "^29.7.0",
"mock-fs": "^5.5.0",
"prettier": "^3.5.3",
"supertest": "^7.1.0"
}
}

View File

@@ -21,9 +21,11 @@ In an AI-driven development process—particularly with tools like [Cursor](http
The script can be configured through environment variables in a `.env` file at the root of the project:
### Required Configuration
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
### Optional Configuration
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
@@ -38,9 +40,10 @@ The script can be configured through environment variables in a `.env` file at t
## How It Works
1. **`tasks.json`**:
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
1. **`tasks.json`**:
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
- Tasks can have `subtasks` for more detailed implementation steps.
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
@@ -102,6 +105,7 @@ node scripts/dev.js update --file=custom-tasks.json --from=5 --prompt="Change da
```
Notes:
- The `--prompt` parameter is required and should explain the changes or new context
- Only tasks that aren't marked as 'done' will be updated
- Tasks with ID >= the specified --from value will be updated
@@ -120,6 +124,7 @@ node scripts/dev.js update-task --id=4 --prompt="Use JWT for authentication" --r
```
This command:
- Updates only the specified task rather than a range of tasks
- Provides detailed validation with helpful error messages
- Checks for required API keys when using research mode
@@ -146,6 +151,7 @@ node scripts/dev.js set-status --id=1,2,3 --status=done
```
Notes:
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
- You can specify multiple task IDs by separating them with commas
@@ -195,6 +201,7 @@ node scripts/dev.js clear-subtasks --all
```
Notes:
- After clearing subtasks, task files are automatically regenerated
- This is useful when you want to regenerate subtasks with a different approach
- Can be combined with the `expand` command to immediately generate new subtasks
@@ -210,6 +217,7 @@ The script integrates with two AI services:
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
To use the Perplexity integration:
1. Obtain a Perplexity API key
2. Add `PERPLEXITY_API_KEY` to your `.env` file
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
@@ -218,6 +226,7 @@ To use the Perplexity integration:
## Logging
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
- `debug`: Detailed information, typically useful for troubleshooting
- `info`: Confirmation that things are working as expected (default)
- `warn`: Warning messages that don't prevent execution
@@ -240,17 +249,20 @@ node scripts/dev.js remove-dependency --id=<id> --depends-on=<id>
These commands:
1. **Allow precise dependency management**:
- Add dependencies between tasks with automatic validation
- Remove dependencies when they're no longer needed
- Update task files automatically after changes
2. **Include validation checks**:
- Prevent circular dependencies (a task depending on itself)
- Prevent duplicate dependencies
- Verify that both tasks exist before adding/removing dependencies
- Check if dependencies exist before attempting to remove them
3. **Provide clear feedback**:
- Success messages confirm when dependencies are added/removed
- Error messages explain why operations failed (if applicable)
@@ -275,6 +287,7 @@ node scripts/dev.js validate-dependencies --file=custom-tasks.json
```
This command:
- Scans all tasks and subtasks for non-existent dependencies
- Identifies potential self-dependencies (tasks referencing themselves)
- Reports all found issues without modifying files
@@ -296,6 +309,7 @@ node scripts/dev.js fix-dependencies --file=custom-tasks.json
```
This command:
1. **Validates all dependencies** across tasks and subtasks
2. **Automatically removes**:
- References to non-existent tasks and subtasks
@@ -333,6 +347,7 @@ node scripts/dev.js analyze-complexity --research
```
Notes:
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
- Tasks are scored on a scale of 1-10
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
@@ -357,33 +372,35 @@ node scripts/dev.js expand --id=8 --num=5 --prompt="Custom prompt"
```
When a complexity report exists:
- The `expand` command will use the recommended subtask count from the report (unless overridden)
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
- When using `--all`, tasks are sorted by complexity score (highest first)
- The `--research` flag is preserved from the complexity analysis to expansion
The output report structure is:
```json
{
"meta": {
"generatedAt": "2023-06-15T12:34:56.789Z",
"tasksAnalyzed": 20,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9.5,
"recommendedSubtasks": 6,
"expansionPrompt": "Create subtasks that handle detecting...",
"reasoning": "This task requires sophisticated logic...",
"expansionCommand": "node scripts/dev.js expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
},
// More tasks sorted by complexity score (highest first)
]
"meta": {
"generatedAt": "2023-06-15T12:34:56.789Z",
"tasksAnalyzed": 20,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": true
},
"complexityAnalysis": [
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9.5,
"recommendedSubtasks": 6,
"expansionPrompt": "Create subtasks that handle detecting...",
"reasoning": "This task requires sophisticated logic...",
"expansionCommand": "node scripts/dev.js expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
}
// More tasks sorted by complexity score (highest first)
]
}
```
@@ -457,16 +474,19 @@ This command is particularly useful when you need to examine a specific task in
The script now includes improved error handling throughout all commands:
1. **Detailed Validation**:
- Required parameters (like task IDs and prompts) are validated early
- File existence is checked with customized errors for common scenarios
- Parameter type conversion is handled with clear error messages
2. **Contextual Error Messages**:
- Task not found errors include suggestions to run the list command
- API key errors include reminders to check environment variables
- Invalid ID format errors show the expected format
3. **Command-Specific Help Displays**:
- When validation fails, detailed help for the specific command is shown
- Help displays include usage examples and parameter descriptions
- Formatted in clear, color-coded boxes with examples
@@ -481,11 +501,13 @@ The script now includes improved error handling throughout all commands:
The script now automatically checks for updates without slowing down execution:
1. **Background Version Checking**:
- Non-blocking version checks run in the background while commands execute
- Actual command execution isn't delayed by version checking
- Update notifications appear after command completion
2. **Update Notifications**:
- When a newer version is available, a notification is displayed
- Notifications include current version, latest version, and update command
- Formatted in an attention-grabbing box with clear instructions
@@ -516,6 +538,7 @@ node scripts/dev.js add-subtask --parent=5 --title="Login API route" --skip-gene
```
Key features:
- Create new subtasks with detailed properties or convert existing tasks
- Define dependencies between subtasks
- Set custom status for new subtasks
@@ -538,7 +561,8 @@ node scripts/dev.js remove-subtask --id=5.2 --skip-generate
```
Key features:
- Remove subtasks individually or in batches
- Optionally convert subtasks to standalone tasks
- Control whether task files are regenerated
- Provides detailed success messages and next steps
- Provides detailed success messages and next steps

View File

@@ -3,17 +3,17 @@
/**
* dev.js
* Task Master CLI - AI-driven development task management
*
*
* This is the refactored entry point that uses the modular architecture.
* It imports functionality from the modules directory and provides a CLI.
*/
// Add at the very beginning of the file
if (process.env.DEBUG === '1') {
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
}
import { runCLI } from './modules/commands.js';
// Run the CLI with the process arguments
runCLI(process.argv);
runCLI(process.argv);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -8,4 +8,4 @@ export * from './utils.js';
export * from './ui.js';
export * from './ai-services.js';
export * from './task-manager.js';
export * from './commands.js';
export * from './commands.js';

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -9,15 +9,15 @@ import chalk from 'chalk';
// Configuration and constants
const CONFIG = {
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
debug: process.env.DEBUG === "true",
logLevel: process.env.LOG_LEVEL || "info",
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || "3"),
defaultPriority: process.env.DEFAULT_PRIORITY || "medium",
projectName: process.env.PROJECT_NAME || "Task Master",
projectVersion: "1.5.0" // Hardcoded version - ALWAYS use this value, ignore environment variable
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
debug: process.env.DEBUG === 'true',
logLevel: process.env.LOG_LEVEL || 'info',
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || '3'),
defaultPriority: process.env.DEFAULT_PRIORITY || 'medium',
projectName: process.env.PROJECT_NAME || 'Task Master',
projectVersion: '1.5.0' // Hardcoded version - ALWAYS use this value, ignore environment variable
};
// Global silent mode flag
@@ -25,25 +25,25 @@ let silentMode = false;
// Set up logging based on log level
const LOG_LEVELS = {
debug: 0,
info: 1,
warn: 2,
error: 3,
success: 1 // Treat success like info level
debug: 0,
info: 1,
warn: 2,
error: 3,
success: 1 // Treat success like info level
};
/**
* Enable silent logging mode
*/
function enableSilentMode() {
silentMode = true;
silentMode = true;
}
/**
* Disable silent logging mode
*/
function disableSilentMode() {
silentMode = false;
silentMode = false;
}
/**
@@ -51,7 +51,7 @@ function disableSilentMode() {
* @returns {boolean} True if silent mode is enabled
*/
function isSilentMode() {
return silentMode;
return silentMode;
}
/**
@@ -60,32 +60,36 @@ function isSilentMode() {
* @param {...any} args - Arguments to log
*/
function log(level, ...args) {
// Immediately return if silentMode is enabled
if (silentMode) {
return;
}
// Use text prefixes instead of emojis
const prefixes = {
debug: chalk.gray("[DEBUG]"),
info: chalk.blue("[INFO]"),
warn: chalk.yellow("[WARN]"),
error: chalk.red("[ERROR]"),
success: chalk.green("[SUCCESS]")
};
// Ensure level exists, default to info if not
const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';
const configLevel = CONFIG.logLevel || 'info'; // Ensure configLevel has a default
// Check log level configuration
if (LOG_LEVELS[currentLevel] >= (LOG_LEVELS[configLevel] ?? LOG_LEVELS.info)) {
const prefix = prefixes[currentLevel] || '';
// Use console.log for all levels, let chalk handle coloring
// Construct the message properly
const message = args.map(arg => typeof arg === 'object' ? JSON.stringify(arg) : arg).join(' ');
console.log(`${prefix} ${message}`);
}
// Immediately return if silentMode is enabled
if (silentMode) {
return;
}
// Use text prefixes instead of emojis
const prefixes = {
debug: chalk.gray('[DEBUG]'),
info: chalk.blue('[INFO]'),
warn: chalk.yellow('[WARN]'),
error: chalk.red('[ERROR]'),
success: chalk.green('[SUCCESS]')
};
// Ensure level exists, default to info if not
const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';
const configLevel = CONFIG.logLevel || 'info'; // Ensure configLevel has a default
// Check log level configuration
if (
LOG_LEVELS[currentLevel] >= (LOG_LEVELS[configLevel] ?? LOG_LEVELS.info)
) {
const prefix = prefixes[currentLevel] || '';
// Use console.log for all levels, let chalk handle coloring
// Construct the message properly
const message = args
.map((arg) => (typeof arg === 'object' ? JSON.stringify(arg) : arg))
.join(' ');
console.log(`${prefix} ${message}`);
}
}
/**
@@ -94,17 +98,17 @@ function log(level, ...args) {
* @returns {Object|null} Parsed JSON data or null if error occurs
*/
function readJSON(filepath) {
try {
const rawData = fs.readFileSync(filepath, 'utf8');
return JSON.parse(rawData);
} catch (error) {
log('error', `Error reading JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
// Use log utility for debug output too
log('error', 'Full error details:', error);
}
return null;
}
try {
const rawData = fs.readFileSync(filepath, 'utf8');
return JSON.parse(rawData);
} catch (error) {
log('error', `Error reading JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
// Use log utility for debug output too
log('error', 'Full error details:', error);
}
return null;
}
}
/**
@@ -113,19 +117,19 @@ function readJSON(filepath) {
* @param {Object} data - Data to write
*/
function writeJSON(filepath, data) {
try {
const dir = path.dirname(filepath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8');
} catch (error) {
log('error', `Error writing JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
// Use log utility for debug output too
log('error', 'Full error details:', error);
}
}
try {
const dir = path.dirname(filepath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8');
} catch (error) {
log('error', `Error writing JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
// Use log utility for debug output too
log('error', 'Full error details:', error);
}
}
}
/**
@@ -134,8 +138,8 @@ function writeJSON(filepath, data) {
* @returns {string} Sanitized prompt
*/
function sanitizePrompt(prompt) {
// Replace double quotes with escaped double quotes
return prompt.replace(/"/g, '\\"');
// Replace double quotes with escaped double quotes
return prompt.replace(/"/g, '\\"');
}
/**
@@ -144,18 +148,20 @@ function sanitizePrompt(prompt) {
* @returns {Object|null} The parsed complexity report or null if not found
*/
function readComplexityReport(customPath = null) {
try {
const reportPath = customPath || path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
if (!fs.existsSync(reportPath)) {
return null;
}
const reportData = fs.readFileSync(reportPath, 'utf8');
return JSON.parse(reportData);
} catch (error) {
log('warn', `Could not read complexity report: ${error.message}`);
return null;
}
try {
const reportPath =
customPath ||
path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
if (!fs.existsSync(reportPath)) {
return null;
}
const reportData = fs.readFileSync(reportPath, 'utf8');
return JSON.parse(reportData);
} catch (error) {
log('warn', `Could not read complexity report: ${error.message}`);
return null;
}
}
/**
@@ -165,11 +171,15 @@ function readComplexityReport(customPath = null) {
* @returns {Object|null} The task analysis or null if not found
*/
function findTaskInComplexityReport(report, taskId) {
if (!report || !report.complexityAnalysis || !Array.isArray(report.complexityAnalysis)) {
return null;
}
return report.complexityAnalysis.find(task => task.taskId === taskId);
if (
!report ||
!report.complexityAnalysis ||
!Array.isArray(report.complexityAnalysis)
) {
return null;
}
return report.complexityAnalysis.find((task) => task.taskId === taskId);
}
/**
@@ -179,24 +189,26 @@ function findTaskInComplexityReport(report, taskId) {
* @returns {boolean} True if the task exists, false otherwise
*/
function taskExists(tasks, taskId) {
if (!taskId || !tasks || !Array.isArray(tasks)) {
return false;
}
// Handle both regular task IDs and subtask IDs (e.g., "1.2")
if (typeof taskId === 'string' && taskId.includes('.')) {
const [parentId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
const parentTask = tasks.find(t => t.id === parentId);
if (!parentTask || !parentTask.subtasks) {
return false;
}
return parentTask.subtasks.some(st => st.id === subtaskId);
}
const id = parseInt(taskId, 10);
return tasks.some(t => t.id === id);
if (!taskId || !tasks || !Array.isArray(tasks)) {
return false;
}
// Handle both regular task IDs and subtask IDs (e.g., "1.2")
if (typeof taskId === 'string' && taskId.includes('.')) {
const [parentId, subtaskId] = taskId
.split('.')
.map((id) => parseInt(id, 10));
const parentTask = tasks.find((t) => t.id === parentId);
if (!parentTask || !parentTask.subtasks) {
return false;
}
return parentTask.subtasks.some((st) => st.id === subtaskId);
}
const id = parseInt(taskId, 10);
return tasks.some((t) => t.id === id);
}
/**
@@ -205,15 +217,15 @@ function taskExists(tasks, taskId) {
* @returns {string} The formatted task ID
*/
function formatTaskId(id) {
if (typeof id === 'string' && id.includes('.')) {
return id; // Already formatted as a string with a dot (e.g., "1.2")
}
if (typeof id === 'number') {
return id.toString();
}
return id;
if (typeof id === 'string' && id.includes('.')) {
return id; // Already formatted as a string with a dot (e.g., "1.2")
}
if (typeof id === 'number') {
return id.toString();
}
return id;
}
/**
@@ -223,35 +235,37 @@ function formatTaskId(id) {
* @returns {Object|null} The task object or null if not found
*/
function findTaskById(tasks, taskId) {
if (!taskId || !tasks || !Array.isArray(tasks)) {
return null;
}
// Check if it's a subtask ID (e.g., "1.2")
if (typeof taskId === 'string' && taskId.includes('.')) {
const [parentId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
const parentTask = tasks.find(t => t.id === parentId);
if (!parentTask || !parentTask.subtasks) {
return null;
}
const subtask = parentTask.subtasks.find(st => st.id === subtaskId);
if (subtask) {
// Add reference to parent task for context
subtask.parentTask = {
id: parentTask.id,
title: parentTask.title,
status: parentTask.status
};
subtask.isSubtask = true;
}
return subtask || null;
}
const id = parseInt(taskId, 10);
return tasks.find(t => t.id === id) || null;
if (!taskId || !tasks || !Array.isArray(tasks)) {
return null;
}
// Check if it's a subtask ID (e.g., "1.2")
if (typeof taskId === 'string' && taskId.includes('.')) {
const [parentId, subtaskId] = taskId
.split('.')
.map((id) => parseInt(id, 10));
const parentTask = tasks.find((t) => t.id === parentId);
if (!parentTask || !parentTask.subtasks) {
return null;
}
const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
if (subtask) {
// Add reference to parent task for context
subtask.parentTask = {
id: parentTask.id,
title: parentTask.title,
status: parentTask.status
};
subtask.isSubtask = true;
}
return subtask || null;
}
const id = parseInt(taskId, 10);
return tasks.find((t) => t.id === id) || null;
}
/**
@@ -261,11 +275,11 @@ function findTaskById(tasks, taskId) {
* @returns {string} The truncated text
*/
function truncate(text, maxLength) {
if (!text || text.length <= maxLength) {
return text;
}
return text.slice(0, maxLength - 3) + '...';
if (!text || text.length <= maxLength) {
return text;
}
return text.slice(0, maxLength - 3) + '...';
}
/**
@@ -276,39 +290,47 @@ function truncate(text, maxLength) {
* @param {Set} recursionStack - Set of nodes in current recursion stack
* @returns {Array} - List of dependency edges that need to be removed to break cycles
*/
function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStack = new Set(), path = []) {
// Mark the current node as visited and part of recursion stack
visited.add(subtaskId);
recursionStack.add(subtaskId);
path.push(subtaskId);
const cyclesToBreak = [];
// Get all dependencies of the current subtask
const dependencies = dependencyMap.get(subtaskId) || [];
// For each dependency
for (const depId of dependencies) {
// If not visited, recursively check for cycles
if (!visited.has(depId)) {
const cycles = findCycles(depId, dependencyMap, visited, recursionStack, [...path]);
cyclesToBreak.push(...cycles);
}
// If the dependency is in the recursion stack, we found a cycle
else if (recursionStack.has(depId)) {
// Find the position of the dependency in the path
const cycleStartIndex = path.indexOf(depId);
// The last edge in the cycle is what we want to remove
const cycleEdges = path.slice(cycleStartIndex);
// We'll remove the last edge in the cycle (the one that points back)
cyclesToBreak.push(depId);
}
}
// Remove the node from recursion stack before returning
recursionStack.delete(subtaskId);
return cyclesToBreak;
function findCycles(
subtaskId,
dependencyMap,
visited = new Set(),
recursionStack = new Set(),
path = []
) {
// Mark the current node as visited and part of recursion stack
visited.add(subtaskId);
recursionStack.add(subtaskId);
path.push(subtaskId);
const cyclesToBreak = [];
// Get all dependencies of the current subtask
const dependencies = dependencyMap.get(subtaskId) || [];
// For each dependency
for (const depId of dependencies) {
// If not visited, recursively check for cycles
if (!visited.has(depId)) {
const cycles = findCycles(depId, dependencyMap, visited, recursionStack, [
...path
]);
cyclesToBreak.push(...cycles);
}
// If the dependency is in the recursion stack, we found a cycle
else if (recursionStack.has(depId)) {
// Find the position of the dependency in the path
const cycleStartIndex = path.indexOf(depId);
// The last edge in the cycle is what we want to remove
const cycleEdges = path.slice(cycleStartIndex);
// We'll remove the last edge in the cycle (the one that points back)
cyclesToBreak.push(depId);
}
}
// Remove the node from recursion stack before returning
recursionStack.delete(subtaskId);
return cyclesToBreak;
}
/**
@@ -317,23 +339,23 @@ function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStac
* @returns {string} The kebab-case version of the string
*/
const toKebabCase = (str) => {
// Special handling for common acronyms
const withReplacedAcronyms = str
.replace(/ID/g, 'Id')
.replace(/API/g, 'Api')
.replace(/UI/g, 'Ui')
.replace(/URL/g, 'Url')
.replace(/URI/g, 'Uri')
.replace(/JSON/g, 'Json')
.replace(/XML/g, 'Xml')
.replace(/HTML/g, 'Html')
.replace(/CSS/g, 'Css');
// Insert hyphens before capital letters and convert to lowercase
return withReplacedAcronyms
.replace(/([A-Z])/g, '-$1')
.toLowerCase()
.replace(/^-/, ''); // Remove leading hyphen if present
// Special handling for common acronyms
const withReplacedAcronyms = str
.replace(/ID/g, 'Id')
.replace(/API/g, 'Api')
.replace(/UI/g, 'Ui')
.replace(/URL/g, 'Url')
.replace(/URI/g, 'Uri')
.replace(/JSON/g, 'Json')
.replace(/XML/g, 'Xml')
.replace(/HTML/g, 'Html')
.replace(/CSS/g, 'Css');
// Insert hyphens before capital letters and convert to lowercase
return withReplacedAcronyms
.replace(/([A-Z])/g, '-$1')
.toLowerCase()
.replace(/^-/, ''); // Remove leading hyphen if present
};
/**
@@ -342,49 +364,49 @@ const toKebabCase = (str) => {
* @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted
*/
function detectCamelCaseFlags(args) {
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
const camelCaseFlags = [];
for (const arg of args) {
if (arg.startsWith('--')) {
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
// Skip single-word flags - they can't be camelCase
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
continue;
}
// Check for camelCase pattern (lowercase followed by uppercase)
if (/[a-z][A-Z]/.test(flagName)) {
const kebabVersion = toKebabCase(flagName);
if (kebabVersion !== flagName) {
camelCaseFlags.push({
original: flagName,
kebabCase: kebabVersion
});
}
}
}
}
return camelCaseFlags;
}
// Export all utility functions and configuration
export {
CONFIG,
LOG_LEVELS,
log,
readJSON,
writeJSON,
sanitizePrompt,
readComplexityReport,
findTaskInComplexityReport,
taskExists,
formatTaskId,
findTaskById,
truncate,
findCycles,
toKebabCase,
detectCamelCaseFlags,
enableSilentMode,
disableSilentMode,
isSilentMode
};
CONFIG,
LOG_LEVELS,
log,
readJSON,
writeJSON,
sanitizePrompt,
readComplexityReport,
findTaskInComplexityReport,
taskExists,
formatTaskId,
findTaskById,
truncate,
findCycles,
toKebabCase,
detectCamelCaseFlags,
enableSilentMode,
disableSilentMode,
isSilentMode
};

View File

@@ -3,7 +3,7 @@
/**
* This script prepares the package for publication to NPM.
* It ensures all necessary files are included and properly configured.
*
*
* Additional options:
* --patch: Increment patch version (default)
* --minor: Increment minor version
@@ -22,176 +22,190 @@ const __dirname = dirname(__filename);
// Define colors for console output
const COLORS = {
reset: '\x1b[0m',
bright: '\x1b[1m',
dim: '\x1b[2m',
red: '\x1b[31m',
green: '\x1b[32m',
yellow: '\x1b[33m',
blue: '\x1b[34m',
magenta: '\x1b[35m',
cyan: '\x1b[36m'
reset: '\x1b[0m',
bright: '\x1b[1m',
dim: '\x1b[2m',
red: '\x1b[31m',
green: '\x1b[32m',
yellow: '\x1b[33m',
blue: '\x1b[34m',
magenta: '\x1b[35m',
cyan: '\x1b[36m'
};
// Parse command line arguments
const args = process.argv.slice(2);
const versionBump = args.includes('--major') ? 'major' :
args.includes('--minor') ? 'minor' :
'patch';
const versionBump = args.includes('--major')
? 'major'
: args.includes('--minor')
? 'minor'
: 'patch';
// Check for explicit version
const versionArg = args.find(arg => arg.startsWith('--version='));
const versionArg = args.find((arg) => arg.startsWith('--version='));
const explicitVersion = versionArg ? versionArg.split('=')[1] : null;
// Log function with color support
function log(level, ...args) {
const prefix = {
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
}[level.toLowerCase()];
console.log(prefix, ...args);
const prefix = {
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
}[level.toLowerCase()];
console.log(prefix, ...args);
}
// Function to check if a file exists
function fileExists(filePath) {
return fs.existsSync(filePath);
return fs.existsSync(filePath);
}
// Function to ensure a file is executable
function ensureExecutable(filePath) {
try {
fs.chmodSync(filePath, '755');
log('info', `Made ${filePath} executable`);
} catch (error) {
log('error', `Failed to make ${filePath} executable:`, error.message);
return false;
}
return true;
try {
fs.chmodSync(filePath, '755');
log('info', `Made ${filePath} executable`);
} catch (error) {
log('error', `Failed to make ${filePath} executable:`, error.message);
return false;
}
return true;
}
// Function to sync template files
function syncTemplateFiles() {
// We no longer need to sync files since we're using them directly
log('info', 'Template syncing has been deprecated - using source files directly');
return true;
// We no longer need to sync files since we're using them directly
log(
'info',
'Template syncing has been deprecated - using source files directly'
);
return true;
}
// Function to increment version
function incrementVersion(currentVersion, type = 'patch') {
const [major, minor, patch] = currentVersion.split('.').map(Number);
switch (type) {
case 'major':
return `${major + 1}.0.0`;
case 'minor':
return `${major}.${minor + 1}.0`;
case 'patch':
default:
return `${major}.${minor}.${patch + 1}`;
}
const [major, minor, patch] = currentVersion.split('.').map(Number);
switch (type) {
case 'major':
return `${major + 1}.0.0`;
case 'minor':
return `${major}.${minor + 1}.0`;
case 'patch':
default:
return `${major}.${minor}.${patch + 1}`;
}
}
// Main function to prepare the package
function preparePackage() {
const rootDir = path.join(__dirname, '..');
log('info', `Preparing package in ${rootDir}`);
// Update version in package.json
const packageJsonPath = path.join(rootDir, 'package.json');
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
const currentVersion = packageJson.version;
let newVersion;
if (explicitVersion) {
newVersion = explicitVersion;
log('info', `Setting version to specified ${newVersion} (was ${currentVersion})`);
} else {
newVersion = incrementVersion(currentVersion, versionBump);
log('info', `Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`);
}
packageJson.version = newVersion;
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
log('success', `Updated package.json version to ${newVersion}`);
// Check for required files
const requiredFiles = [
'package.json',
'README-task-master.md',
'index.js',
'scripts/init.js',
'scripts/dev.js',
'assets/env.example',
'assets/gitignore',
'assets/example_prd.txt',
'assets/scripts_README.md',
'.cursor/rules/dev_workflow.mdc',
'.cursor/rules/taskmaster.mdc',
'.cursor/rules/cursor_rules.mdc',
'.cursor/rules/self_improve.mdc'
];
let allFilesExist = true;
for (const file of requiredFiles) {
const filePath = path.join(rootDir, file);
if (!fileExists(filePath)) {
log('error', `Required file ${file} does not exist`);
allFilesExist = false;
}
}
if (!allFilesExist) {
log('error', 'Some required files are missing. Package preparation failed.');
process.exit(1);
}
// Ensure scripts are executable
const executableScripts = [
'scripts/init.js',
'scripts/dev.js'
];
let allScriptsExecutable = true;
for (const script of executableScripts) {
const scriptPath = path.join(rootDir, script);
if (!ensureExecutable(scriptPath)) {
allScriptsExecutable = false;
}
}
if (!allScriptsExecutable) {
log('warn', 'Some scripts could not be made executable. This may cause issues.');
}
// Run npm pack to test package creation
try {
log('info', 'Running npm pack to test package creation...');
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
log('info', output);
} catch (error) {
log('error', 'Failed to run npm pack:', error.message);
process.exit(1);
}
// Make scripts executable
log('info', 'Making scripts executable...');
try {
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
log('info', 'Made scripts/init.js executable');
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
log('info', 'Made scripts/dev.js executable');
} catch (error) {
log('error', 'Failed to make scripts executable:', error.message);
}
log('success', `Package preparation completed successfully! 🎉`);
log('success', `Version updated to ${newVersion}`);
log('info', 'You can now publish the package with:');
log('info', ' npm publish');
const rootDir = path.join(__dirname, '..');
log('info', `Preparing package in ${rootDir}`);
// Update version in package.json
const packageJsonPath = path.join(rootDir, 'package.json');
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
const currentVersion = packageJson.version;
let newVersion;
if (explicitVersion) {
newVersion = explicitVersion;
log(
'info',
`Setting version to specified ${newVersion} (was ${currentVersion})`
);
} else {
newVersion = incrementVersion(currentVersion, versionBump);
log(
'info',
`Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`
);
}
packageJson.version = newVersion;
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
log('success', `Updated package.json version to ${newVersion}`);
// Check for required files
const requiredFiles = [
'package.json',
'README-task-master.md',
'index.js',
'scripts/init.js',
'scripts/dev.js',
'assets/env.example',
'assets/gitignore',
'assets/example_prd.txt',
'assets/scripts_README.md',
'.cursor/rules/dev_workflow.mdc',
'.cursor/rules/taskmaster.mdc',
'.cursor/rules/cursor_rules.mdc',
'.cursor/rules/self_improve.mdc'
];
let allFilesExist = true;
for (const file of requiredFiles) {
const filePath = path.join(rootDir, file);
if (!fileExists(filePath)) {
log('error', `Required file ${file} does not exist`);
allFilesExist = false;
}
}
if (!allFilesExist) {
log(
'error',
'Some required files are missing. Package preparation failed.'
);
process.exit(1);
}
// Ensure scripts are executable
const executableScripts = ['scripts/init.js', 'scripts/dev.js'];
let allScriptsExecutable = true;
for (const script of executableScripts) {
const scriptPath = path.join(rootDir, script);
if (!ensureExecutable(scriptPath)) {
allScriptsExecutable = false;
}
}
if (!allScriptsExecutable) {
log(
'warn',
'Some scripts could not be made executable. This may cause issues.'
);
}
// Run npm pack to test package creation
try {
log('info', 'Running npm pack to test package creation...');
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
log('info', output);
} catch (error) {
log('error', 'Failed to run npm pack:', error.message);
process.exit(1);
}
// Make scripts executable
log('info', 'Making scripts executable...');
try {
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
log('info', 'Made scripts/init.js executable');
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
log('info', 'Made scripts/dev.js executable');
} catch (error) {
log('error', 'Failed to make scripts executable:', error.message);
}
log('success', `Package preparation completed successfully! 🎉`);
log('success', `Version updated to ${newVersion}`);
log('info', 'You can now publish the package with:');
log('info', ' npm publish');
}
// Run the preparation
preparePackage();
preparePackage();

View File

@@ -1,203 +1,203 @@
{
"meta": {
"generatedAt": "2025-03-24T20:01:35.986Z",
"tasksAnalyzed": 24,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": false
},
"complexityAnalysis": [
{
"taskId": 1,
"taskTitle": "Implement Task Data Structure",
"complexityScore": 7,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
},
{
"taskId": 2,
"taskTitle": "Develop Command Line Interface Foundation",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
},
{
"taskId": 3,
"taskTitle": "Implement Basic Task Operations",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
},
{
"taskId": 4,
"taskTitle": "Create Task File Generation System",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
},
{
"taskId": 5,
"taskTitle": "Integrate Anthropic Claude API",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
},
{
"taskId": 6,
"taskTitle": "Build PRD Parsing System",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
},
{
"taskId": 7,
"taskTitle": "Implement Task Expansion with Claude",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
},
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
},
{
"taskId": 9,
"taskTitle": "Integrate Perplexity API",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
},
{
"taskId": 10,
"taskTitle": "Create Research-Backed Subtask Generation",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
},
{
"taskId": 11,
"taskTitle": "Implement Batch Operations",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
},
{
"taskId": 12,
"taskTitle": "Develop Project Initialization System",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
},
{
"taskId": 13,
"taskTitle": "Create Cursor Rules Implementation",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
},
{
"taskId": 14,
"taskTitle": "Develop Agent Workflow Guidelines",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
},
{
"taskId": 15,
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
},
{
"taskId": 16,
"taskTitle": "Create Configuration Management System",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
},
{
"taskId": 17,
"taskTitle": "Implement Comprehensive Logging System",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
},
{
"taskId": 18,
"taskTitle": "Create Comprehensive User Documentation",
"complexityScore": 7,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
},
{
"taskId": 19,
"taskTitle": "Implement Error Handling and Recovery",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
},
{
"taskId": 20,
"taskTitle": "Create Token Usage Tracking and Cost Management",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
},
{
"taskId": 21,
"taskTitle": "Refactor dev.js into Modular Components",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
},
{
"taskId": 22,
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
},
{
"taskId": 23,
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
},
{
"taskId": 24,
"taskTitle": "Implement AI-Powered Test Generation Command",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
}
]
}
"meta": {
"generatedAt": "2025-03-24T20:01:35.986Z",
"tasksAnalyzed": 24,
"thresholdScore": 5,
"projectName": "Your Project Name",
"usedResearch": false
},
"complexityAnalysis": [
{
"taskId": 1,
"taskTitle": "Implement Task Data Structure",
"complexityScore": 7,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
},
{
"taskId": 2,
"taskTitle": "Develop Command Line Interface Foundation",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
},
{
"taskId": 3,
"taskTitle": "Implement Basic Task Operations",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
},
{
"taskId": 4,
"taskTitle": "Create Task File Generation System",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
},
{
"taskId": 5,
"taskTitle": "Integrate Anthropic Claude API",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
},
{
"taskId": 6,
"taskTitle": "Build PRD Parsing System",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
},
{
"taskId": 7,
"taskTitle": "Implement Task Expansion with Claude",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
},
{
"taskId": 8,
"taskTitle": "Develop Implementation Drift Handling",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
},
{
"taskId": 9,
"taskTitle": "Integrate Perplexity API",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
},
{
"taskId": 10,
"taskTitle": "Create Research-Backed Subtask Generation",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
},
{
"taskId": 11,
"taskTitle": "Implement Batch Operations",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
},
{
"taskId": 12,
"taskTitle": "Develop Project Initialization System",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
},
{
"taskId": 13,
"taskTitle": "Create Cursor Rules Implementation",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
},
{
"taskId": 14,
"taskTitle": "Develop Agent Workflow Guidelines",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
},
{
"taskId": 15,
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
},
{
"taskId": 16,
"taskTitle": "Create Configuration Management System",
"complexityScore": 6,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
},
{
"taskId": 17,
"taskTitle": "Implement Comprehensive Logging System",
"complexityScore": 5,
"recommendedSubtasks": 3,
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
},
{
"taskId": 18,
"taskTitle": "Create Comprehensive User Documentation",
"complexityScore": 7,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
},
{
"taskId": 19,
"taskTitle": "Implement Error Handling and Recovery",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
},
{
"taskId": 20,
"taskTitle": "Create Token Usage Tracking and Cost Management",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
},
{
"taskId": 21,
"taskTitle": "Refactor dev.js into Modular Components",
"complexityScore": 8,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
},
{
"taskId": 22,
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
},
{
"taskId": 23,
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
"complexityScore": 9,
"recommendedSubtasks": 5,
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
},
{
"taskId": 24,
"taskTitle": "Implement AI-Powered Test Generation Command",
"complexityScore": 7,
"recommendedSubtasks": 4,
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
}
]
}

View File

@@ -2,7 +2,7 @@
/**
* test-claude-errors.js
*
*
* A test script to verify the error handling and retry logic in the callClaude function.
* This script creates a modified version of dev.js that simulates different error scenarios.
*/
@@ -22,7 +22,7 @@ dotenv.config();
// Create a simple PRD for testing
const createTestPRD = () => {
return `# Test PRD for Error Handling
return `# Test PRD for Error Handling
## Overview
This is a simple test PRD to verify the error handling in the callClaude function.
@@ -36,21 +36,22 @@ This is a simple test PRD to verify the error handling in the callClaude functio
// Create a modified version of dev.js that simulates errors
function createErrorSimulationScript(errorType, failureCount = 2) {
// Read the original dev.js file
const devJsPath = path.join(__dirname, 'dev.js');
const devJsContent = fs.readFileSync(devJsPath, 'utf8');
// Create a modified version that simulates errors
let modifiedContent = devJsContent;
// Find the anthropic.messages.create call and replace it with our mock
const anthropicCallRegex = /const response = await anthropic\.messages\.create\(/;
let mockCode = '';
switch (errorType) {
case 'network':
mockCode = `
// Read the original dev.js file
const devJsPath = path.join(__dirname, 'dev.js');
const devJsContent = fs.readFileSync(devJsPath, 'utf8');
// Create a modified version that simulates errors
let modifiedContent = devJsContent;
// Find the anthropic.messages.create call and replace it with our mock
const anthropicCallRegex =
/const response = await anthropic\.messages\.create\(/;
let mockCode = '';
switch (errorType) {
case 'network':
mockCode = `
// Mock for network error simulation
let currentAttempt = 0;
const failureCount = ${failureCount};
@@ -65,10 +66,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
}
const response = await anthropic.messages.create(`;
break;
case 'timeout':
mockCode = `
break;
case 'timeout':
mockCode = `
// Mock for timeout error simulation
let currentAttempt = 0;
const failureCount = ${failureCount};
@@ -83,10 +84,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
}
const response = await anthropic.messages.create(`;
break;
case 'invalid-json':
mockCode = `
break;
case 'invalid-json':
mockCode = `
// Mock for invalid JSON response
let currentAttempt = 0;
const failureCount = ${failureCount};
@@ -107,10 +108,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
}
const response = await anthropic.messages.create(`;
break;
case 'empty-tasks':
mockCode = `
break;
case 'empty-tasks':
mockCode = `
// Mock for empty tasks array
let currentAttempt = 0;
const failureCount = ${failureCount};
@@ -131,82 +132,87 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
}
const response = await anthropic.messages.create(`;
break;
default:
// No modification
mockCode = `const response = await anthropic.messages.create(`;
}
// Replace the anthropic call with our mock
modifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);
// Write the modified script to a temporary file
const tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);
fs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');
return tempScriptPath;
break;
default:
// No modification
mockCode = `const response = await anthropic.messages.create(`;
}
// Replace the anthropic call with our mock
modifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);
// Write the modified script to a temporary file
const tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);
fs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');
return tempScriptPath;
}
// Function to run a test with a specific error type
async function runErrorTest(errorType, numTasks = 5, failureCount = 2) {
console.log(`\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);
// Create a test PRD
const testPRD = createTestPRD();
const testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);
fs.writeFileSync(testPRDPath, testPRD, 'utf8');
// Create a modified dev.js that simulates the specified error
const tempScriptPath = createErrorSimulationScript(errorType, failureCount);
console.log(`Created test PRD at ${testPRDPath}`);
console.log(`Created error simulation script at ${tempScriptPath}`);
console.log(`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`);
try {
// Run the modified script
execSync(`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`, {
stdio: 'inherit'
});
console.log(`${errorType} error test completed successfully`);
} catch (error) {
console.error(`${errorType} error test failed:`, error.message);
} finally {
// Clean up temporary files
if (fs.existsSync(tempScriptPath)) {
fs.unlinkSync(tempScriptPath);
}
if (fs.existsSync(testPRDPath)) {
fs.unlinkSync(testPRDPath);
}
}
console.log(`\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);
// Create a test PRD
const testPRD = createTestPRD();
const testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);
fs.writeFileSync(testPRDPath, testPRD, 'utf8');
// Create a modified dev.js that simulates the specified error
const tempScriptPath = createErrorSimulationScript(errorType, failureCount);
console.log(`Created test PRD at ${testPRDPath}`);
console.log(`Created error simulation script at ${tempScriptPath}`);
console.log(
`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`
);
try {
// Run the modified script
execSync(
`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`,
{
stdio: 'inherit'
}
);
console.log(`${errorType} error test completed successfully`);
} catch (error) {
console.error(`${errorType} error test failed:`, error.message);
} finally {
// Clean up temporary files
if (fs.existsSync(tempScriptPath)) {
fs.unlinkSync(tempScriptPath);
}
if (fs.existsSync(testPRDPath)) {
fs.unlinkSync(testPRDPath);
}
}
}
// Function to run all error tests
async function runAllErrorTests() {
console.log('Starting error handling tests for callClaude function...');
// Test 1: Network error with automatic retry
await runErrorTest('network', 5, 2);
// Test 2: Timeout error with automatic retry
await runErrorTest('timeout', 5, 2);
// Test 3: Invalid JSON response with task reduction
await runErrorTest('invalid-json', 10, 2);
// Test 4: Empty tasks array with task reduction
await runErrorTest('empty-tasks', 15, 2);
// Test 5: Exhausted retries (more failures than MAX_RETRIES)
await runErrorTest('network', 5, 4);
console.log('\nAll error tests completed!');
console.log('Starting error handling tests for callClaude function...');
// Test 1: Network error with automatic retry
await runErrorTest('network', 5, 2);
// Test 2: Timeout error with automatic retry
await runErrorTest('timeout', 5, 2);
// Test 3: Invalid JSON response with task reduction
await runErrorTest('invalid-json', 10, 2);
// Test 4: Empty tasks array with task reduction
await runErrorTest('empty-tasks', 15, 2);
// Test 5: Exhausted retries (more failures than MAX_RETRIES)
await runErrorTest('network', 5, 4);
console.log('\nAll error tests completed!');
}
// Run the tests
runAllErrorTests().catch(error => {
console.error('Error running tests:', error);
process.exit(1);
});
runAllErrorTests().catch((error) => {
console.error('Error running tests:', error);
process.exit(1);
});

View File

@@ -2,7 +2,7 @@
/**
* test-claude.js
*
*
* A simple test script to verify the improvements to the callClaude function.
* This script tests different scenarios:
* 1. Normal operation with a small PRD
@@ -24,11 +24,11 @@ dotenv.config();
// Create a simple PRD for testing
const createTestPRD = (size = 'small', taskComplexity = 'simple') => {
let content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\n\n`;
// Add more content based on size
if (size === 'small') {
content += `
let content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\n\n`;
// Add more content based on size
if (size === 'small') {
content += `
## Overview
This is a small test PRD to verify the callClaude function improvements.
@@ -44,9 +44,9 @@ This is a small test PRD to verify the callClaude function improvements.
- Backend: Node.js
- Database: MongoDB
`;
} else if (size === 'medium') {
// Medium-sized PRD with more requirements
content += `
} else if (size === 'medium') {
// Medium-sized PRD with more requirements
content += `
## Overview
This is a medium-sized test PRD to verify the callClaude function improvements.
@@ -76,20 +76,20 @@ This is a medium-sized test PRD to verify the callClaude function improvements.
- CI/CD: GitHub Actions
- Monitoring: Prometheus and Grafana
`;
} else if (size === 'large') {
// Large PRD with many requirements
content += `
} else if (size === 'large') {
// Large PRD with many requirements
content += `
## Overview
This is a large test PRD to verify the callClaude function improvements.
## Requirements
`;
// Generate 30 requirements
for (let i = 1; i <= 30; i++) {
content += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\n`;
}
content += `
// Generate 30 requirements
for (let i = 1; i <= 30; i++) {
content += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\n`;
}
content += `
## Technical Stack
- Frontend: React with TypeScript
- Backend: Node.js with Express
@@ -101,12 +101,12 @@ This is a large test PRD to verify the callClaude function improvements.
## User Stories
`;
// Generate 20 user stories
for (let i = 1; i <= 20; i++) {
content += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\n`;
}
content += `
// Generate 20 user stories
for (let i = 1; i <= 20; i++) {
content += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\n`;
}
content += `
## Non-Functional Requirements
- Performance: The system should respond within 200ms
- Scalability: The system should handle 10,000 concurrent users
@@ -114,11 +114,11 @@ This is a large test PRD to verify the callClaude function improvements.
- Security: The system should comply with OWASP top 10
- Accessibility: The system should comply with WCAG 2.1 AA
`;
}
// Add complexity if needed
if (taskComplexity === 'complex') {
content += `
}
// Add complexity if needed
if (taskComplexity === 'complex') {
content += `
## Complex Requirements
- Implement a real-time collaboration system
- Add a machine learning-based recommendation engine
@@ -131,101 +131,110 @@ This is a large test PRD to verify the callClaude function improvements.
- Implement a custom reporting system
- Add a custom dashboard builder
`;
}
return content;
}
return content;
};
// Function to run the tests
async function runTests() {
console.log('Starting tests for callClaude function improvements...');
try {
// Instead of importing the callClaude function directly, we'll use the dev.js script
// with our test PRDs by running it as a child process
// Test 1: Small PRD, 5 tasks
console.log('\n=== Test 1: Small PRD, 5 tasks ===');
const smallPRD = createTestPRD('small', 'simple');
const smallPRDPath = path.join(__dirname, 'test-small-prd.txt');
fs.writeFileSync(smallPRDPath, smallPRD, 'utf8');
console.log(`Created test PRD at ${smallPRDPath}`);
console.log('Running dev.js with small PRD...');
// Use the child_process module to run the dev.js script
const { execSync } = await import('child_process');
try {
const smallResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`, {
stdio: 'inherit'
});
console.log('Small PRD test completed successfully');
} catch (error) {
console.error('Small PRD test failed:', error.message);
}
// Test 2: Medium PRD, 15 tasks
console.log('\n=== Test 2: Medium PRD, 15 tasks ===');
const mediumPRD = createTestPRD('medium', 'simple');
const mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');
fs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');
console.log(`Created test PRD at ${mediumPRDPath}`);
console.log('Running dev.js with medium PRD...');
try {
const mediumResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`, {
stdio: 'inherit'
});
console.log('Medium PRD test completed successfully');
} catch (error) {
console.error('Medium PRD test failed:', error.message);
}
// Test 3: Large PRD, 25 tasks
console.log('\n=== Test 3: Large PRD, 25 tasks ===');
const largePRD = createTestPRD('large', 'complex');
const largePRDPath = path.join(__dirname, 'test-large-prd.txt');
fs.writeFileSync(largePRDPath, largePRD, 'utf8');
console.log(`Created test PRD at ${largePRDPath}`);
console.log('Running dev.js with large PRD...');
try {
const largeResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`, {
stdio: 'inherit'
});
console.log('Large PRD test completed successfully');
} catch (error) {
console.error('Large PRD test failed:', error.message);
}
console.log('\nAll tests completed!');
} catch (error) {
console.error('Test failed:', error);
} finally {
// Clean up test files
console.log('\nCleaning up test files...');
const testFiles = [
path.join(__dirname, 'test-small-prd.txt'),
path.join(__dirname, 'test-medium-prd.txt'),
path.join(__dirname, 'test-large-prd.txt')
];
testFiles.forEach(file => {
if (fs.existsSync(file)) {
fs.unlinkSync(file);
console.log(`Deleted ${file}`);
}
});
console.log('Cleanup complete.');
}
console.log('Starting tests for callClaude function improvements...');
try {
// Instead of importing the callClaude function directly, we'll use the dev.js script
// with our test PRDs by running it as a child process
// Test 1: Small PRD, 5 tasks
console.log('\n=== Test 1: Small PRD, 5 tasks ===');
const smallPRD = createTestPRD('small', 'simple');
const smallPRDPath = path.join(__dirname, 'test-small-prd.txt');
fs.writeFileSync(smallPRDPath, smallPRD, 'utf8');
console.log(`Created test PRD at ${smallPRDPath}`);
console.log('Running dev.js with small PRD...');
// Use the child_process module to run the dev.js script
const { execSync } = await import('child_process');
try {
const smallResult = execSync(
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`,
{
stdio: 'inherit'
}
);
console.log('Small PRD test completed successfully');
} catch (error) {
console.error('Small PRD test failed:', error.message);
}
// Test 2: Medium PRD, 15 tasks
console.log('\n=== Test 2: Medium PRD, 15 tasks ===');
const mediumPRD = createTestPRD('medium', 'simple');
const mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');
fs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');
console.log(`Created test PRD at ${mediumPRDPath}`);
console.log('Running dev.js with medium PRD...');
try {
const mediumResult = execSync(
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`,
{
stdio: 'inherit'
}
);
console.log('Medium PRD test completed successfully');
} catch (error) {
console.error('Medium PRD test failed:', error.message);
}
// Test 3: Large PRD, 25 tasks
console.log('\n=== Test 3: Large PRD, 25 tasks ===');
const largePRD = createTestPRD('large', 'complex');
const largePRDPath = path.join(__dirname, 'test-large-prd.txt');
fs.writeFileSync(largePRDPath, largePRD, 'utf8');
console.log(`Created test PRD at ${largePRDPath}`);
console.log('Running dev.js with large PRD...');
try {
const largeResult = execSync(
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`,
{
stdio: 'inherit'
}
);
console.log('Large PRD test completed successfully');
} catch (error) {
console.error('Large PRD test failed:', error.message);
}
console.log('\nAll tests completed!');
} catch (error) {
console.error('Test failed:', error);
} finally {
// Clean up test files
console.log('\nCleaning up test files...');
const testFiles = [
path.join(__dirname, 'test-small-prd.txt'),
path.join(__dirname, 'test-medium-prd.txt'),
path.join(__dirname, 'test-large-prd.txt')
];
testFiles.forEach((file) => {
if (fs.existsSync(file)) {
fs.unlinkSync(file);
console.log(`Deleted ${file}`);
}
});
console.log('Cleanup complete.');
}
}
// Run the tests
runTests().catch(error => {
console.error('Error running tests:', error);
process.exit(1);
});
runTests().catch((error) => {
console.error('Error running tests:', error);
process.exit(1);
});

View File

@@ -1,4 +1,8 @@
import { checkForUpdate, displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
import {
checkForUpdate,
displayUpgradeNotification,
compareVersions
} from './scripts/modules/commands.js';
import fs from 'fs';
import path from 'path';
@@ -7,63 +11,73 @@ process.env.FORCE_VERSION = '0.9.30';
// Create a mock package.json in memory for testing
const mockPackageJson = {
name: 'task-master-ai',
version: '0.9.30'
name: 'task-master-ai',
version: '0.9.30'
};
// Modified version of checkForUpdate that doesn't use HTTP for testing
async function testCheckForUpdate(simulatedLatestVersion) {
// Get current version - use our forced version
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
console.log(`Using simulated current version: ${currentVersion}`);
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
// Compare versions
const needsUpdate = compareVersions(currentVersion, simulatedLatestVersion) < 0;
return {
currentVersion,
latestVersion: simulatedLatestVersion,
needsUpdate
};
// Get current version - use our forced version
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
console.log(`Using simulated current version: ${currentVersion}`);
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
// Compare versions
const needsUpdate =
compareVersions(currentVersion, simulatedLatestVersion) < 0;
return {
currentVersion,
latestVersion: simulatedLatestVersion,
needsUpdate
};
}
// Test with current version older than latest (should show update notice)
async function runTest() {
console.log('=== Testing version check scenarios ===\n');
// Scenario 1: Update available
console.log('\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---');
const updateInfo1 = await testCheckForUpdate('1.0.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo1.currentVersion}`);
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
if (updateInfo1.needsUpdate) {
console.log('\nDisplaying upgrade notification:');
displayUpgradeNotification(updateInfo1.currentVersion, updateInfo1.latestVersion);
}
// Scenario 2: No update needed (versions equal)
console.log('\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---');
const updateInfo2 = await testCheckForUpdate('0.9.30');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo2.currentVersion}`);
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
// Scenario 3: Development version (current newer than latest)
console.log('\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---');
const updateInfo3 = await testCheckForUpdate('0.9.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo3.currentVersion}`);
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
console.log('\n=== Test complete ===');
console.log('=== Testing version check scenarios ===\n');
// Scenario 1: Update available
console.log(
'\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---'
);
const updateInfo1 = await testCheckForUpdate('1.0.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo1.currentVersion}`);
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
if (updateInfo1.needsUpdate) {
console.log('\nDisplaying upgrade notification:');
displayUpgradeNotification(
updateInfo1.currentVersion,
updateInfo1.latestVersion
);
}
// Scenario 2: No update needed (versions equal)
console.log(
'\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---'
);
const updateInfo2 = await testCheckForUpdate('0.9.30');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo2.currentVersion}`);
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
// Scenario 3: Development version (current newer than latest)
console.log(
'\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---'
);
const updateInfo3 = await testCheckForUpdate('0.9.0');
console.log('Update check results:');
console.log(`- Current version: ${updateInfo3.currentVersion}`);
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
console.log('\n=== Test complete ===');
}
// Run all tests
runTest();
runTest();

View File

@@ -1,4 +1,7 @@
import { displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
import {
displayUpgradeNotification,
compareVersions
} from './scripts/modules/commands.js';
// Simulate different version scenarios
console.log('=== Simulating version check ===\n');
@@ -8,15 +11,25 @@ console.log('Scenario 1: Current version older than latest');
displayUpgradeNotification('0.9.30', '1.0.0');
// 2. Current version same as latest (no update needed)
console.log('\nScenario 2: Current version same as latest (this would not normally show a notice)');
console.log(
'\nScenario 2: Current version same as latest (this would not normally show a notice)'
);
console.log('Current: 1.0.0, Latest: 1.0.0');
console.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));
console.log('Update needed:', compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No');
console.log(
'Update needed:',
compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No'
);
// 3. Current version newer than latest (e.g., development version, would not show notice)
console.log('\nScenario 3: Current version newer than latest (this would not normally show a notice)');
console.log(
'\nScenario 3: Current version newer than latest (this would not normally show a notice)'
);
console.log('Current: 1.1.0, Latest: 1.0.0');
console.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));
console.log('Update needed:', compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No');
console.log(
'Update needed:',
compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No'
);
console.log('\n=== Test complete ===');
console.log('\n=== Test complete ===');

View File

@@ -60,4 +60,4 @@ We aim for at least 80% test coverage for all code paths. Coverage reports can b
```bash
npm run test:coverage
```
```

View File

@@ -1,14 +1,14 @@
{
"tasks": [
{
"id": 1,
"dependencies": [],
"subtasks": [
{
"id": 1,
"dependencies": []
}
]
}
]
}
"tasks": [
{
"id": 1,
"dependencies": [],
"subtasks": [
{
"id": 1,
"dependencies": []
}
]
}
]
}

View File

@@ -3,42 +3,50 @@
*/
export const sampleClaudeResponse = {
tasks: [
{
id: 1,
title: "Setup Task Data Structure",
description: "Implement the core task data structure and file operations",
status: "pending",
dependencies: [],
priority: "high",
details: "Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.",
testStrategy: "Verify tasks.json is created with the correct structure and that task data can be read from and written to the file."
},
{
id: 2,
title: "Implement CLI Foundation",
description: "Create the command-line interface foundation with basic commands",
status: "pending",
dependencies: [1],
priority: "high",
details: "Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.",
testStrategy: "Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly."
},
{
id: 3,
title: "Develop Task Management Operations",
description: "Implement core operations for creating, reading, updating, and deleting tasks",
status: "pending",
dependencies: [1],
priority: "medium",
details: "Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.",
testStrategy: "Create unit tests for each CRUD operation to verify they correctly modify the task data."
}
],
metadata: {
projectName: "Task Management CLI",
totalTasks: 3,
sourceFile: "tests/fixtures/sample-prd.txt",
generatedAt: "2023-12-15"
}
};
tasks: [
{
id: 1,
title: 'Setup Task Data Structure',
description: 'Implement the core task data structure and file operations',
status: 'pending',
dependencies: [],
priority: 'high',
details:
'Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.',
testStrategy:
'Verify tasks.json is created with the correct structure and that task data can be read from and written to the file.'
},
{
id: 2,
title: 'Implement CLI Foundation',
description:
'Create the command-line interface foundation with basic commands',
status: 'pending',
dependencies: [1],
priority: 'high',
details:
'Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.',
testStrategy:
'Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly.'
},
{
id: 3,
title: 'Develop Task Management Operations',
description:
'Implement core operations for creating, reading, updating, and deleting tasks',
status: 'pending',
dependencies: [1],
priority: 'medium',
details:
'Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.',
testStrategy:
'Create unit tests for each CRUD operation to verify they correctly modify the task data.'
}
],
metadata: {
projectName: 'Task Management CLI',
totalTasks: 3,
sourceFile: 'tests/fixtures/sample-prd.txt',
generatedAt: '2023-12-15'
}
};

Some files were not shown because too many files have changed in this diff Show More