Compare commits
3 Commits
v0.20.0
...
crunchyman
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
17ef607c41 | ||
|
|
d95aaf5316 | ||
|
|
8a3841e195 |
@@ -1,20 +1,18 @@
|
|||||||
{
|
{
|
||||||
"mcpServers": {
|
"mcpServers": {
|
||||||
"taskmaster-ai": {
|
"taskmaster-ai": {
|
||||||
"command": "node",
|
"command": "node",
|
||||||
"args": [
|
"args": ["./mcp-server/server.js"],
|
||||||
"./mcp-server/server.js"
|
"env": {
|
||||||
],
|
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||||
"env": {
|
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
"MODEL": "claude-3-7-sonnet-20250219",
|
||||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
"PERPLEXITY_MODEL": "sonar-pro",
|
||||||
"MODEL": "claude-3-7-sonnet-20250219",
|
"MAX_TOKENS": 128000,
|
||||||
"PERPLEXITY_MODEL": "sonar-pro",
|
"TEMPERATURE": 0.2,
|
||||||
"MAX_TOKENS": 128000,
|
"DEFAULT_SUBTASKS": 5,
|
||||||
"TEMPERATURE": 0.2,
|
"DEFAULT_PRIORITY": "medium"
|
||||||
"DEFAULT_SUBTASKS": 5,
|
}
|
||||||
"DEFAULT_PRIORITY": "medium"
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
60
.github/workflows/ci.yml
vendored
60
.github/workflows/ci.yml
vendored
@@ -14,7 +14,7 @@ permissions:
|
|||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
setup:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -24,21 +24,55 @@ jobs:
|
|||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 20
|
node-version: 20
|
||||||
cache: "npm"
|
cache: 'npm'
|
||||||
|
|
||||||
|
- name: Install Dependencies
|
||||||
|
id: install
|
||||||
|
run: npm ci
|
||||||
|
timeout-minutes: 2
|
||||||
|
|
||||||
- name: Cache node_modules
|
- name: Cache node_modules
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
with:
|
with:
|
||||||
path: |
|
path: node_modules
|
||||||
node_modules
|
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||||
*/*/node_modules
|
|
||||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-node-
|
|
||||||
|
|
||||||
- name: Install Dependencies
|
format-check:
|
||||||
run: npm ci
|
needs: setup
|
||||||
timeout-minutes: 2
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Restore node_modules
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: node_modules
|
||||||
|
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
|
||||||
|
- name: Format Check
|
||||||
|
run: npm run format-check
|
||||||
|
env:
|
||||||
|
FORCE_COLOR: 1
|
||||||
|
|
||||||
|
test:
|
||||||
|
needs: setup
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Restore node_modules
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: node_modules
|
||||||
|
key: ${{ runner.os }}-node-modules-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
|
||||||
- name: Run Tests
|
- name: Run Tests
|
||||||
run: |
|
run: |
|
||||||
@@ -47,13 +81,13 @@ jobs:
|
|||||||
NODE_ENV: test
|
NODE_ENV: test
|
||||||
CI: true
|
CI: true
|
||||||
FORCE_COLOR: 1
|
FORCE_COLOR: 1
|
||||||
timeout-minutes: 15
|
timeout-minutes: 10
|
||||||
|
|
||||||
- name: Upload Test Results
|
- name: Upload Test Results
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: test-results-node
|
name: test-results
|
||||||
path: |
|
path: |
|
||||||
test-results
|
test-results
|
||||||
coverage
|
coverage
|
||||||
|
|||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 20
|
node-version: 20
|
||||||
cache: "npm"
|
cache: 'npm'
|
||||||
|
|
||||||
- name: Cache node_modules
|
- name: Cache node_modules
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
|
|||||||
6
.prettierignore
Normal file
6
.prettierignore
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# Ignore artifacts:
|
||||||
|
build
|
||||||
|
coverage
|
||||||
|
.changeset
|
||||||
|
tasks
|
||||||
|
package-lock.json
|
||||||
11
.prettierrc
Normal file
11
.prettierrc
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"printWidth": 80,
|
||||||
|
"tabWidth": 2,
|
||||||
|
"useTabs": true,
|
||||||
|
"semi": true,
|
||||||
|
"singleQuote": true,
|
||||||
|
"trailingComma": "none",
|
||||||
|
"bracketSpacing": true,
|
||||||
|
"arrowParens": "always",
|
||||||
|
"endOfLine": "lf"
|
||||||
|
}
|
||||||
@@ -58,6 +58,7 @@ This will prompt you for project details and set up a new project with the neces
|
|||||||
### Important Notes
|
### Important Notes
|
||||||
|
|
||||||
1. **ES Modules Configuration:**
|
1. **ES Modules Configuration:**
|
||||||
|
|
||||||
- This project uses ES Modules (ESM) instead of CommonJS.
|
- This project uses ES Modules (ESM) instead of CommonJS.
|
||||||
- This is set via `"type": "module"` in your package.json.
|
- This is set via `"type": "module"` in your package.json.
|
||||||
- Use `import/export` syntax instead of `require()`.
|
- Use `import/export` syntax instead of `require()`.
|
||||||
|
|||||||
32
README.md
32
README.md
@@ -26,22 +26,22 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"mcpServers": {
|
"mcpServers": {
|
||||||
"taskmaster-ai": {
|
"taskmaster-ai": {
|
||||||
"command": "npx",
|
"command": "npx",
|
||||||
"args": ["-y", "task-master-ai", "mcp-server"],
|
"args": ["-y", "task-master-ai", "mcp-server"],
|
||||||
"env": {
|
"env": {
|
||||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||||
"MODEL": "claude-3-7-sonnet-20250219",
|
"MODEL": "claude-3-7-sonnet-20250219",
|
||||||
"PERPLEXITY_MODEL": "sonar-pro",
|
"PERPLEXITY_MODEL": "sonar-pro",
|
||||||
"MAX_TOKENS": 128000,
|
"MAX_TOKENS": 128000,
|
||||||
"TEMPERATURE": 0.2,
|
"TEMPERATURE": 0.2,
|
||||||
"DEFAULT_SUBTASKS": 5,
|
"DEFAULT_SUBTASKS": 5,
|
||||||
"DEFAULT_PRIORITY": "medium"
|
"DEFAULT_PRIORITY": "medium"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -21,9 +21,11 @@ In an AI-driven development process—particularly with tools like [Cursor](http
|
|||||||
The script can be configured through environment variables in a `.env` file at the root of the project:
|
The script can be configured through environment variables in a `.env` file at the root of the project:
|
||||||
|
|
||||||
### Required Configuration
|
### Required Configuration
|
||||||
|
|
||||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
||||||
|
|
||||||
### Optional Configuration
|
### Optional Configuration
|
||||||
|
|
||||||
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
||||||
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
||||||
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
||||||
@@ -38,9 +40,10 @@ The script can be configured through environment variables in a `.env` file at t
|
|||||||
|
|
||||||
## How It Works
|
## How It Works
|
||||||
|
|
||||||
1. **`tasks.json`**:
|
1. **`tasks.json`**:
|
||||||
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
|
||||||
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
||||||
|
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
||||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||||
|
|
||||||
@@ -50,7 +53,7 @@ The script can be configured through environment variables in a `.env` file at t
|
|||||||
```bash
|
```bash
|
||||||
# If installed globally
|
# If installed globally
|
||||||
task-master [command] [options]
|
task-master [command] [options]
|
||||||
|
|
||||||
# If using locally within the project
|
# If using locally within the project
|
||||||
node scripts/dev.js [command] [options]
|
node scripts/dev.js [command] [options]
|
||||||
```
|
```
|
||||||
@@ -111,6 +114,7 @@ task-master update --file=custom-tasks.json --from=5 --prompt="Change database f
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- The `--prompt` parameter is required and should explain the changes or new context
|
- The `--prompt` parameter is required and should explain the changes or new context
|
||||||
- Only tasks that aren't marked as 'done' will be updated
|
- Only tasks that aren't marked as 'done' will be updated
|
||||||
- Tasks with ID >= the specified --from value will be updated
|
- Tasks with ID >= the specified --from value will be updated
|
||||||
@@ -134,6 +138,7 @@ task-master set-status --id=1,2,3 --status=done
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
|
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
|
||||||
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
|
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
|
||||||
- You can specify multiple task IDs by separating them with commas
|
- You can specify multiple task IDs by separating them with commas
|
||||||
@@ -183,6 +188,7 @@ task-master clear-subtasks --all
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- After clearing subtasks, task files are automatically regenerated
|
- After clearing subtasks, task files are automatically regenerated
|
||||||
- This is useful when you want to regenerate subtasks with a different approach
|
- This is useful when you want to regenerate subtasks with a different approach
|
||||||
- Can be combined with the `expand` command to immediately generate new subtasks
|
- Can be combined with the `expand` command to immediately generate new subtasks
|
||||||
@@ -198,6 +204,7 @@ The script integrates with two AI services:
|
|||||||
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
|
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
|
||||||
|
|
||||||
To use the Perplexity integration:
|
To use the Perplexity integration:
|
||||||
|
|
||||||
1. Obtain a Perplexity API key
|
1. Obtain a Perplexity API key
|
||||||
2. Add `PERPLEXITY_API_KEY` to your `.env` file
|
2. Add `PERPLEXITY_API_KEY` to your `.env` file
|
||||||
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
|
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
|
||||||
@@ -206,6 +213,7 @@ To use the Perplexity integration:
|
|||||||
## Logging
|
## Logging
|
||||||
|
|
||||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
||||||
|
|
||||||
- `debug`: Detailed information, typically useful for troubleshooting
|
- `debug`: Detailed information, typically useful for troubleshooting
|
||||||
- `info`: Confirmation that things are working as expected (default)
|
- `info`: Confirmation that things are working as expected (default)
|
||||||
- `warn`: Warning messages that don't prevent execution
|
- `warn`: Warning messages that don't prevent execution
|
||||||
@@ -228,17 +236,20 @@ task-master remove-dependency --id=<id> --depends-on=<id>
|
|||||||
These commands:
|
These commands:
|
||||||
|
|
||||||
1. **Allow precise dependency management**:
|
1. **Allow precise dependency management**:
|
||||||
|
|
||||||
- Add dependencies between tasks with automatic validation
|
- Add dependencies between tasks with automatic validation
|
||||||
- Remove dependencies when they're no longer needed
|
- Remove dependencies when they're no longer needed
|
||||||
- Update task files automatically after changes
|
- Update task files automatically after changes
|
||||||
|
|
||||||
2. **Include validation checks**:
|
2. **Include validation checks**:
|
||||||
|
|
||||||
- Prevent circular dependencies (a task depending on itself)
|
- Prevent circular dependencies (a task depending on itself)
|
||||||
- Prevent duplicate dependencies
|
- Prevent duplicate dependencies
|
||||||
- Verify that both tasks exist before adding/removing dependencies
|
- Verify that both tasks exist before adding/removing dependencies
|
||||||
- Check if dependencies exist before attempting to remove them
|
- Check if dependencies exist before attempting to remove them
|
||||||
|
|
||||||
3. **Provide clear feedback**:
|
3. **Provide clear feedback**:
|
||||||
|
|
||||||
- Success messages confirm when dependencies are added/removed
|
- Success messages confirm when dependencies are added/removed
|
||||||
- Error messages explain why operations failed (if applicable)
|
- Error messages explain why operations failed (if applicable)
|
||||||
|
|
||||||
@@ -263,6 +274,7 @@ task-master validate-dependencies --file=custom-tasks.json
|
|||||||
```
|
```
|
||||||
|
|
||||||
This command:
|
This command:
|
||||||
|
|
||||||
- Scans all tasks and subtasks for non-existent dependencies
|
- Scans all tasks and subtasks for non-existent dependencies
|
||||||
- Identifies potential self-dependencies (tasks referencing themselves)
|
- Identifies potential self-dependencies (tasks referencing themselves)
|
||||||
- Reports all found issues without modifying files
|
- Reports all found issues without modifying files
|
||||||
@@ -284,6 +296,7 @@ task-master fix-dependencies --file=custom-tasks.json
|
|||||||
```
|
```
|
||||||
|
|
||||||
This command:
|
This command:
|
||||||
|
|
||||||
1. **Validates all dependencies** across tasks and subtasks
|
1. **Validates all dependencies** across tasks and subtasks
|
||||||
2. **Automatically removes**:
|
2. **Automatically removes**:
|
||||||
- References to non-existent tasks and subtasks
|
- References to non-existent tasks and subtasks
|
||||||
@@ -321,6 +334,7 @@ task-master analyze-complexity --research
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
|
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
|
||||||
- Tasks are scored on a scale of 1-10
|
- Tasks are scored on a scale of 1-10
|
||||||
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
|
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
|
||||||
@@ -345,33 +359,35 @@ task-master expand --id=8 --num=5 --prompt="Custom prompt"
|
|||||||
```
|
```
|
||||||
|
|
||||||
When a complexity report exists:
|
When a complexity report exists:
|
||||||
|
|
||||||
- The `expand` command will use the recommended subtask count from the report (unless overridden)
|
- The `expand` command will use the recommended subtask count from the report (unless overridden)
|
||||||
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
|
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
|
||||||
- When using `--all`, tasks are sorted by complexity score (highest first)
|
- When using `--all`, tasks are sorted by complexity score (highest first)
|
||||||
- The `--research` flag is preserved from the complexity analysis to expansion
|
- The `--research` flag is preserved from the complexity analysis to expansion
|
||||||
|
|
||||||
The output report structure is:
|
The output report structure is:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"meta": {
|
"meta": {
|
||||||
"generatedAt": "2023-06-15T12:34:56.789Z",
|
"generatedAt": "2023-06-15T12:34:56.789Z",
|
||||||
"tasksAnalyzed": 20,
|
"tasksAnalyzed": 20,
|
||||||
"thresholdScore": 5,
|
"thresholdScore": 5,
|
||||||
"projectName": "Your Project Name",
|
"projectName": "Your Project Name",
|
||||||
"usedResearch": true
|
"usedResearch": true
|
||||||
},
|
},
|
||||||
"complexityAnalysis": [
|
"complexityAnalysis": [
|
||||||
{
|
{
|
||||||
"taskId": 8,
|
"taskId": 8,
|
||||||
"taskTitle": "Develop Implementation Drift Handling",
|
"taskTitle": "Develop Implementation Drift Handling",
|
||||||
"complexityScore": 9.5,
|
"complexityScore": 9.5,
|
||||||
"recommendedSubtasks": 6,
|
"recommendedSubtasks": 6,
|
||||||
"expansionPrompt": "Create subtasks that handle detecting...",
|
"expansionPrompt": "Create subtasks that handle detecting...",
|
||||||
"reasoning": "This task requires sophisticated logic...",
|
"reasoning": "This task requires sophisticated logic...",
|
||||||
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
"expansionCommand": "task-master expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
||||||
},
|
}
|
||||||
// More tasks sorted by complexity score (highest first)
|
// More tasks sorted by complexity score (highest first)
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -438,4 +454,4 @@ This command:
|
|||||||
- Commands for working with subtasks
|
- Commands for working with subtasks
|
||||||
- For subtasks, provides a link to view the parent task
|
- For subtasks, provides a link to view the parent task
|
||||||
|
|
||||||
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
|
This command is particularly useful when you need to examine a specific task in detail before implementing it or when you want to check the status and details of a particular task.
|
||||||
|
|||||||
@@ -20,11 +20,11 @@ const args = process.argv.slice(2);
|
|||||||
|
|
||||||
// Spawn the init script with all arguments
|
// Spawn the init script with all arguments
|
||||||
const child = spawn('node', [initScriptPath, ...args], {
|
const child = spawn('node', [initScriptPath, ...args], {
|
||||||
stdio: 'inherit',
|
stdio: 'inherit',
|
||||||
cwd: process.cwd()
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
|
|
||||||
// Handle exit
|
// Handle exit
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
process.exit(code);
|
process.exit(code);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -44,30 +44,36 @@ const initScriptPath = resolve(__dirname, '../scripts/init.js');
|
|||||||
|
|
||||||
// Helper function to run dev.js with arguments
|
// Helper function to run dev.js with arguments
|
||||||
function runDevScript(args) {
|
function runDevScript(args) {
|
||||||
// Debug: Show the transformed arguments when DEBUG=1 is set
|
// Debug: Show the transformed arguments when DEBUG=1 is set
|
||||||
if (process.env.DEBUG === '1') {
|
if (process.env.DEBUG === '1') {
|
||||||
console.error('\nDEBUG - CLI Wrapper Analysis:');
|
console.error('\nDEBUG - CLI Wrapper Analysis:');
|
||||||
console.error('- Original command: ' + process.argv.join(' '));
|
console.error('- Original command: ' + process.argv.join(' '));
|
||||||
console.error('- Transformed args: ' + args.join(' '));
|
console.error('- Transformed args: ' + args.join(' '));
|
||||||
console.error('- dev.js will receive: node ' + devScriptPath + ' ' + args.join(' ') + '\n');
|
console.error(
|
||||||
}
|
'- dev.js will receive: node ' +
|
||||||
|
devScriptPath +
|
||||||
// For testing: If TEST_MODE is set, just print args and exit
|
' ' +
|
||||||
if (process.env.TEST_MODE === '1') {
|
args.join(' ') +
|
||||||
console.log('Would execute:');
|
'\n'
|
||||||
console.log(`node ${devScriptPath} ${args.join(' ')}`);
|
);
|
||||||
process.exit(0);
|
}
|
||||||
return;
|
|
||||||
}
|
// For testing: If TEST_MODE is set, just print args and exit
|
||||||
|
if (process.env.TEST_MODE === '1') {
|
||||||
const child = spawn('node', [devScriptPath, ...args], {
|
console.log('Would execute:');
|
||||||
stdio: 'inherit',
|
console.log(`node ${devScriptPath} ${args.join(' ')}`);
|
||||||
cwd: process.cwd()
|
process.exit(0);
|
||||||
});
|
return;
|
||||||
|
}
|
||||||
child.on('close', (code) => {
|
|
||||||
process.exit(code);
|
const child = spawn('node', [devScriptPath, ...args], {
|
||||||
});
|
stdio: 'inherit',
|
||||||
|
cwd: process.cwd()
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
process.exit(code);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to detect camelCase and convert to kebab-case
|
// Helper function to detect camelCase and convert to kebab-case
|
||||||
@@ -79,228 +85,239 @@ const toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();
|
|||||||
* @returns {Function} Wrapper action function
|
* @returns {Function} Wrapper action function
|
||||||
*/
|
*/
|
||||||
function createDevScriptAction(commandName) {
|
function createDevScriptAction(commandName) {
|
||||||
return (options, cmd) => {
|
return (options, cmd) => {
|
||||||
// Check for camelCase flags and error out with helpful message
|
// Check for camelCase flags and error out with helpful message
|
||||||
const camelCaseFlags = detectCamelCaseFlags(process.argv);
|
const camelCaseFlags = detectCamelCaseFlags(process.argv);
|
||||||
|
|
||||||
// If camelCase flags were found, show error and exit
|
|
||||||
if (camelCaseFlags.length > 0) {
|
|
||||||
console.error('\nError: Please use kebab-case for CLI flags:');
|
|
||||||
camelCaseFlags.forEach(flag => {
|
|
||||||
console.error(` Instead of: --${flag.original}`);
|
|
||||||
console.error(` Use: --${flag.kebabCase}`);
|
|
||||||
});
|
|
||||||
console.error('\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since we've ensured no camelCase flags, we can now just:
|
|
||||||
// 1. Start with the command name
|
|
||||||
const args = [commandName];
|
|
||||||
|
|
||||||
// 3. Get positional arguments and explicit flags from the command line
|
|
||||||
const commandArgs = [];
|
|
||||||
const positionals = new Set(); // Track positional args we've seen
|
|
||||||
|
|
||||||
// Find the command in raw process.argv to extract args
|
|
||||||
const commandIndex = process.argv.indexOf(commandName);
|
|
||||||
if (commandIndex !== -1) {
|
|
||||||
// Process all args after the command name
|
|
||||||
for (let i = commandIndex + 1; i < process.argv.length; i++) {
|
|
||||||
const arg = process.argv[i];
|
|
||||||
|
|
||||||
if (arg.startsWith('--')) {
|
|
||||||
// It's a flag - pass through as is
|
|
||||||
commandArgs.push(arg);
|
|
||||||
// Skip the next arg if this is a flag with a value (not --flag=value format)
|
|
||||||
if (!arg.includes('=') &&
|
|
||||||
i + 1 < process.argv.length &&
|
|
||||||
!process.argv[i+1].startsWith('--')) {
|
|
||||||
commandArgs.push(process.argv[++i]);
|
|
||||||
}
|
|
||||||
} else if (!positionals.has(arg)) {
|
|
||||||
// It's a positional argument we haven't seen
|
|
||||||
commandArgs.push(arg);
|
|
||||||
positionals.add(arg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add all command line args we collected
|
|
||||||
args.push(...commandArgs);
|
|
||||||
|
|
||||||
// 4. Add default options from Commander if not specified on command line
|
|
||||||
// Track which options we've seen on the command line
|
|
||||||
const userOptions = new Set();
|
|
||||||
for (const arg of commandArgs) {
|
|
||||||
if (arg.startsWith('--')) {
|
|
||||||
// Extract option name (without -- and value)
|
|
||||||
const name = arg.split('=')[0].slice(2);
|
|
||||||
userOptions.add(name);
|
|
||||||
|
|
||||||
// Add the kebab-case version too, to prevent duplicates
|
|
||||||
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
|
|
||||||
userOptions.add(kebabName);
|
|
||||||
|
|
||||||
// Add the camelCase version as well
|
|
||||||
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) => letter.toUpperCase());
|
|
||||||
userOptions.add(camelName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add Commander-provided defaults for options not specified by user
|
|
||||||
Object.entries(options).forEach(([key, value]) => {
|
|
||||||
// Debug output to see what keys we're getting
|
|
||||||
if (process.env.DEBUG === '1') {
|
|
||||||
console.error(`DEBUG - Processing option: ${key} = ${value}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case for numTasks > num-tasks (a known problem case)
|
// If camelCase flags were found, show error and exit
|
||||||
if (key === 'numTasks') {
|
if (camelCaseFlags.length > 0) {
|
||||||
if (process.env.DEBUG === '1') {
|
console.error('\nError: Please use kebab-case for CLI flags:');
|
||||||
console.error('DEBUG - Converting numTasks to num-tasks');
|
camelCaseFlags.forEach((flag) => {
|
||||||
}
|
console.error(` Instead of: --${flag.original}`);
|
||||||
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
|
console.error(` Use: --${flag.kebabCase}`);
|
||||||
args.push(`--num-tasks=${value}`);
|
});
|
||||||
}
|
console.error(
|
||||||
return;
|
'\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\n'
|
||||||
}
|
);
|
||||||
|
process.exit(1);
|
||||||
// Skip built-in Commander properties and options the user provided
|
}
|
||||||
if (['parent', 'commands', 'options', 'rawArgs'].includes(key) || userOptions.has(key)) {
|
|
||||||
return;
|
// Since we've ensured no camelCase flags, we can now just:
|
||||||
}
|
// 1. Start with the command name
|
||||||
|
const args = [commandName];
|
||||||
// Also check the kebab-case version of this key
|
|
||||||
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
|
// 3. Get positional arguments and explicit flags from the command line
|
||||||
if (userOptions.has(kebabKey)) {
|
const commandArgs = [];
|
||||||
return;
|
const positionals = new Set(); // Track positional args we've seen
|
||||||
}
|
|
||||||
|
// Find the command in raw process.argv to extract args
|
||||||
// Add default values, using kebab-case for the parameter name
|
const commandIndex = process.argv.indexOf(commandName);
|
||||||
if (value !== undefined) {
|
if (commandIndex !== -1) {
|
||||||
if (typeof value === 'boolean') {
|
// Process all args after the command name
|
||||||
if (value === true) {
|
for (let i = commandIndex + 1; i < process.argv.length; i++) {
|
||||||
args.push(`--${kebabKey}`);
|
const arg = process.argv[i];
|
||||||
} else if (value === false && key === 'generate') {
|
|
||||||
args.push('--skip-generate');
|
if (arg.startsWith('--')) {
|
||||||
}
|
// It's a flag - pass through as is
|
||||||
} else {
|
commandArgs.push(arg);
|
||||||
// Always use kebab-case for option names
|
// Skip the next arg if this is a flag with a value (not --flag=value format)
|
||||||
args.push(`--${kebabKey}=${value}`);
|
if (
|
||||||
}
|
!arg.includes('=') &&
|
||||||
}
|
i + 1 < process.argv.length &&
|
||||||
});
|
!process.argv[i + 1].startsWith('--')
|
||||||
|
) {
|
||||||
// Special handling for parent parameter (uses -p)
|
commandArgs.push(process.argv[++i]);
|
||||||
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
|
}
|
||||||
args.push('-p', options.parent);
|
} else if (!positionals.has(arg)) {
|
||||||
}
|
// It's a positional argument we haven't seen
|
||||||
|
commandArgs.push(arg);
|
||||||
// Debug output for troubleshooting
|
positionals.add(arg);
|
||||||
if (process.env.DEBUG === '1') {
|
}
|
||||||
console.error('DEBUG - Command args:', commandArgs);
|
}
|
||||||
console.error('DEBUG - User options:', Array.from(userOptions));
|
}
|
||||||
console.error('DEBUG - Commander options:', options);
|
|
||||||
console.error('DEBUG - Final args:', args);
|
// Add all command line args we collected
|
||||||
}
|
args.push(...commandArgs);
|
||||||
|
|
||||||
// Run the script with our processed args
|
// 4. Add default options from Commander if not specified on command line
|
||||||
runDevScript(args);
|
// Track which options we've seen on the command line
|
||||||
};
|
const userOptions = new Set();
|
||||||
|
for (const arg of commandArgs) {
|
||||||
|
if (arg.startsWith('--')) {
|
||||||
|
// Extract option name (without -- and value)
|
||||||
|
const name = arg.split('=')[0].slice(2);
|
||||||
|
userOptions.add(name);
|
||||||
|
|
||||||
|
// Add the kebab-case version too, to prevent duplicates
|
||||||
|
const kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||||
|
userOptions.add(kebabName);
|
||||||
|
|
||||||
|
// Add the camelCase version as well
|
||||||
|
const camelName = kebabName.replace(/-([a-z])/g, (_, letter) =>
|
||||||
|
letter.toUpperCase()
|
||||||
|
);
|
||||||
|
userOptions.add(camelName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add Commander-provided defaults for options not specified by user
|
||||||
|
Object.entries(options).forEach(([key, value]) => {
|
||||||
|
// Debug output to see what keys we're getting
|
||||||
|
if (process.env.DEBUG === '1') {
|
||||||
|
console.error(`DEBUG - Processing option: ${key} = ${value}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special case for numTasks > num-tasks (a known problem case)
|
||||||
|
if (key === 'numTasks') {
|
||||||
|
if (process.env.DEBUG === '1') {
|
||||||
|
console.error('DEBUG - Converting numTasks to num-tasks');
|
||||||
|
}
|
||||||
|
if (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {
|
||||||
|
args.push(`--num-tasks=${value}`);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip built-in Commander properties and options the user provided
|
||||||
|
if (
|
||||||
|
['parent', 'commands', 'options', 'rawArgs'].includes(key) ||
|
||||||
|
userOptions.has(key)
|
||||||
|
) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also check the kebab-case version of this key
|
||||||
|
const kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();
|
||||||
|
if (userOptions.has(kebabKey)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add default values, using kebab-case for the parameter name
|
||||||
|
if (value !== undefined) {
|
||||||
|
if (typeof value === 'boolean') {
|
||||||
|
if (value === true) {
|
||||||
|
args.push(`--${kebabKey}`);
|
||||||
|
} else if (value === false && key === 'generate') {
|
||||||
|
args.push('--skip-generate');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Always use kebab-case for option names
|
||||||
|
args.push(`--${kebabKey}=${value}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Special handling for parent parameter (uses -p)
|
||||||
|
if (options.parent && !args.includes('-p') && !userOptions.has('parent')) {
|
||||||
|
args.push('-p', options.parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug output for troubleshooting
|
||||||
|
if (process.env.DEBUG === '1') {
|
||||||
|
console.error('DEBUG - Command args:', commandArgs);
|
||||||
|
console.error('DEBUG - User options:', Array.from(userOptions));
|
||||||
|
console.error('DEBUG - Commander options:', options);
|
||||||
|
console.error('DEBUG - Final args:', args);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the script with our processed args
|
||||||
|
runDevScript(args);
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Special case for the 'init' command which uses a different script
|
// Special case for the 'init' command which uses a different script
|
||||||
function registerInitCommand(program) {
|
function registerInitCommand(program) {
|
||||||
program
|
program
|
||||||
.command('init')
|
.command('init')
|
||||||
.description('Initialize a new project')
|
.description('Initialize a new project')
|
||||||
.option('-y, --yes', 'Skip prompts and use default values')
|
.option('-y, --yes', 'Skip prompts and use default values')
|
||||||
.option('-n, --name <name>', 'Project name')
|
.option('-n, --name <name>', 'Project name')
|
||||||
.option('-d, --description <description>', 'Project description')
|
.option('-d, --description <description>', 'Project description')
|
||||||
.option('-v, --version <version>', 'Project version')
|
.option('-v, --version <version>', 'Project version')
|
||||||
.option('-a, --author <author>', 'Author name')
|
.option('-a, --author <author>', 'Author name')
|
||||||
.option('--skip-install', 'Skip installing dependencies')
|
.option('--skip-install', 'Skip installing dependencies')
|
||||||
.option('--dry-run', 'Show what would be done without making changes')
|
.option('--dry-run', 'Show what would be done without making changes')
|
||||||
.action((options) => {
|
.action((options) => {
|
||||||
// Pass through any options to the init script
|
// Pass through any options to the init script
|
||||||
const args = ['--yes', 'name', 'description', 'version', 'author', 'skip-install', 'dry-run']
|
const args = [
|
||||||
.filter(opt => options[opt])
|
'--yes',
|
||||||
.map(opt => {
|
'name',
|
||||||
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
|
'description',
|
||||||
return `--${opt}`;
|
'version',
|
||||||
}
|
'author',
|
||||||
return `--${opt}=${options[opt]}`;
|
'skip-install',
|
||||||
});
|
'dry-run'
|
||||||
|
]
|
||||||
const child = spawn('node', [initScriptPath, ...args], {
|
.filter((opt) => options[opt])
|
||||||
stdio: 'inherit',
|
.map((opt) => {
|
||||||
cwd: process.cwd()
|
if (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {
|
||||||
});
|
return `--${opt}`;
|
||||||
|
}
|
||||||
child.on('close', (code) => {
|
return `--${opt}=${options[opt]}`;
|
||||||
process.exit(code);
|
});
|
||||||
});
|
|
||||||
});
|
const child = spawn('node', [initScriptPath, ...args], {
|
||||||
|
stdio: 'inherit',
|
||||||
|
cwd: process.cwd()
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
process.exit(code);
|
||||||
|
});
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up the command-line interface
|
// Set up the command-line interface
|
||||||
const program = new Command();
|
const program = new Command();
|
||||||
|
|
||||||
program
|
program
|
||||||
.name('task-master')
|
.name('task-master')
|
||||||
.description('Claude Task Master CLI')
|
.description('Claude Task Master CLI')
|
||||||
.version(version)
|
.version(version)
|
||||||
.addHelpText('afterAll', () => {
|
.addHelpText('afterAll', () => {
|
||||||
// Use the same help display function as dev.js for consistency
|
// Use the same help display function as dev.js for consistency
|
||||||
displayHelp();
|
displayHelp();
|
||||||
return ''; // Return empty string to prevent commander's default help
|
return ''; // Return empty string to prevent commander's default help
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add custom help option to directly call our help display
|
// Add custom help option to directly call our help display
|
||||||
program.helpOption('-h, --help', 'Display help information');
|
program.helpOption('-h, --help', 'Display help information');
|
||||||
program.on('--help', () => {
|
program.on('--help', () => {
|
||||||
displayHelp();
|
displayHelp();
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add special case commands
|
// Add special case commands
|
||||||
registerInitCommand(program);
|
registerInitCommand(program);
|
||||||
|
|
||||||
program
|
program
|
||||||
.command('dev')
|
.command('dev')
|
||||||
.description('Run the dev.js script')
|
.description('Run the dev.js script')
|
||||||
.action(() => {
|
.action(() => {
|
||||||
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
||||||
runDevScript(args);
|
runDevScript(args);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use a temporary Command instance to get all command definitions
|
// Use a temporary Command instance to get all command definitions
|
||||||
const tempProgram = new Command();
|
const tempProgram = new Command();
|
||||||
registerCommands(tempProgram);
|
registerCommands(tempProgram);
|
||||||
|
|
||||||
// For each command in the temp instance, add a modified version to our actual program
|
// For each command in the temp instance, add a modified version to our actual program
|
||||||
tempProgram.commands.forEach(cmd => {
|
tempProgram.commands.forEach((cmd) => {
|
||||||
if (['init', 'dev'].includes(cmd.name())) {
|
if (['init', 'dev'].includes(cmd.name())) {
|
||||||
// Skip commands we've already defined specially
|
// Skip commands we've already defined specially
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new command with the same name and description
|
// Create a new command with the same name and description
|
||||||
const newCmd = program
|
const newCmd = program.command(cmd.name()).description(cmd.description());
|
||||||
.command(cmd.name())
|
|
||||||
.description(cmd.description());
|
// Copy all options
|
||||||
|
cmd.options.forEach((opt) => {
|
||||||
// Copy all options
|
newCmd.option(opt.flags, opt.description, opt.defaultValue);
|
||||||
cmd.options.forEach(opt => {
|
});
|
||||||
newCmd.option(
|
|
||||||
opt.flags,
|
// Set the action to proxy to dev.js
|
||||||
opt.description,
|
newCmd.action(createDevScriptAction(cmd.name()));
|
||||||
opt.defaultValue
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Set the action to proxy to dev.js
|
|
||||||
newCmd.action(createDevScriptAction(cmd.name()));
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Parse the command line arguments
|
// Parse the command line arguments
|
||||||
@@ -308,47 +325,56 @@ program.parse(process.argv);
|
|||||||
|
|
||||||
// Add global error handling for unknown commands and options
|
// Add global error handling for unknown commands and options
|
||||||
process.on('uncaughtException', (err) => {
|
process.on('uncaughtException', (err) => {
|
||||||
// Check if this is a commander.js unknown option error
|
// Check if this is a commander.js unknown option error
|
||||||
if (err.code === 'commander.unknownOption') {
|
if (err.code === 'commander.unknownOption') {
|
||||||
const option = err.message.match(/'([^']+)'/)?.[1];
|
const option = err.message.match(/'([^']+)'/)?.[1];
|
||||||
const commandArg = process.argv.find(arg => !arg.startsWith('-') &&
|
const commandArg = process.argv.find(
|
||||||
arg !== 'task-master' &&
|
(arg) =>
|
||||||
!arg.includes('/') &&
|
!arg.startsWith('-') &&
|
||||||
arg !== 'node');
|
arg !== 'task-master' &&
|
||||||
const command = commandArg || 'unknown';
|
!arg.includes('/') &&
|
||||||
|
arg !== 'node'
|
||||||
console.error(chalk.red(`Error: Unknown option '${option}'`));
|
);
|
||||||
console.error(chalk.yellow(`Run 'task-master ${command} --help' to see available options for this command`));
|
const command = commandArg || 'unknown';
|
||||||
process.exit(1);
|
|
||||||
}
|
console.error(chalk.red(`Error: Unknown option '${option}'`));
|
||||||
|
console.error(
|
||||||
// Check if this is a commander.js unknown command error
|
chalk.yellow(
|
||||||
if (err.code === 'commander.unknownCommand') {
|
`Run 'task-master ${command} --help' to see available options for this command`
|
||||||
const command = err.message.match(/'([^']+)'/)?.[1];
|
)
|
||||||
|
);
|
||||||
console.error(chalk.red(`Error: Unknown command '${command}'`));
|
process.exit(1);
|
||||||
console.error(chalk.yellow(`Run 'task-master --help' to see available commands`));
|
}
|
||||||
process.exit(1);
|
|
||||||
}
|
// Check if this is a commander.js unknown command error
|
||||||
|
if (err.code === 'commander.unknownCommand') {
|
||||||
// Handle other uncaught exceptions
|
const command = err.message.match(/'([^']+)'/)?.[1];
|
||||||
console.error(chalk.red(`Error: ${err.message}`));
|
|
||||||
if (process.env.DEBUG === '1') {
|
console.error(chalk.red(`Error: Unknown command '${command}'`));
|
||||||
console.error(err);
|
console.error(
|
||||||
}
|
chalk.yellow(`Run 'task-master --help' to see available commands`)
|
||||||
process.exit(1);
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle other uncaught exceptions
|
||||||
|
console.error(chalk.red(`Error: ${err.message}`));
|
||||||
|
if (process.env.DEBUG === '1') {
|
||||||
|
console.error(err);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Show help if no command was provided (just 'task-master' with no args)
|
// Show help if no command was provided (just 'task-master' with no args)
|
||||||
if (process.argv.length <= 2) {
|
if (process.argv.length <= 2) {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
displayHelp();
|
displayHelp();
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add exports at the end of the file
|
// Add exports at the end of the file
|
||||||
if (typeof module !== 'undefined') {
|
if (typeof module !== 'undefined') {
|
||||||
module.exports = {
|
module.exports = {
|
||||||
detectCamelCaseFlags
|
detectCamelCaseFlags
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,39 +41,39 @@ Core functions should follow this pattern to support both CLI and MCP use:
|
|||||||
* @returns {Object|undefined} - Returns data when source is 'mcp'
|
* @returns {Object|undefined} - Returns data when source is 'mcp'
|
||||||
*/
|
*/
|
||||||
function exampleFunction(param1, param2, options = {}) {
|
function exampleFunction(param1, param2, options = {}) {
|
||||||
try {
|
try {
|
||||||
// Skip UI for MCP
|
// Skip UI for MCP
|
||||||
if (options.source !== 'mcp') {
|
if (options.source !== 'mcp') {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
console.log(chalk.blue('Processing operation...'));
|
console.log(chalk.blue('Processing operation...'));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the core business logic
|
// Do the core business logic
|
||||||
const result = doSomething(param1, param2);
|
const result = doSomething(param1, param2);
|
||||||
|
|
||||||
// For MCP, return structured data
|
// For MCP, return structured data
|
||||||
if (options.source === 'mcp') {
|
if (options.source === 'mcp') {
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: result
|
data: result
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// For CLI, display output
|
// For CLI, display output
|
||||||
console.log(chalk.green('Operation completed successfully!'));
|
console.log(chalk.green('Operation completed successfully!'));
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Handle errors based on source
|
// Handle errors based on source
|
||||||
if (options.source === 'mcp') {
|
if (options.source === 'mcp') {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: error.message
|
error: error.message
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// CLI error handling
|
// CLI error handling
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -89,17 +89,17 @@ export const simpleFunction = adaptForMcp(originalFunction);
|
|||||||
|
|
||||||
// Split implementation - completely different code paths for CLI vs MCP
|
// Split implementation - completely different code paths for CLI vs MCP
|
||||||
export const complexFunction = sourceSplitFunction(
|
export const complexFunction = sourceSplitFunction(
|
||||||
// CLI version with UI
|
// CLI version with UI
|
||||||
function(param1, param2) {
|
function (param1, param2) {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
console.log(`Processing ${param1}...`);
|
console.log(`Processing ${param1}...`);
|
||||||
// ... CLI implementation
|
// ... CLI implementation
|
||||||
},
|
},
|
||||||
// MCP version with structured return
|
// MCP version with structured return
|
||||||
function(param1, param2, options = {}) {
|
function (param1, param2, options = {}) {
|
||||||
// ... MCP implementation
|
// ... MCP implementation
|
||||||
return { success: true, data };
|
return { success: true, data };
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -110,7 +110,7 @@ When adding new features, follow these steps to ensure CLI and MCP compatibility
|
|||||||
1. **Implement Core Logic** in the appropriate module file
|
1. **Implement Core Logic** in the appropriate module file
|
||||||
2. **Add Source Parameter Support** using the pattern above
|
2. **Add Source Parameter Support** using the pattern above
|
||||||
3. **Add to task-master-core.js** to make it available for direct import
|
3. **Add to task-master-core.js** to make it available for direct import
|
||||||
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
|
4. **Update Command Map** in `mcp-server/src/tools/utils.js`
|
||||||
5. **Create Tool Implementation** in `mcp-server/src/tools/`
|
5. **Create Tool Implementation** in `mcp-server/src/tools/`
|
||||||
6. **Register the Tool** in `mcp-server/src/tools/index.js`
|
6. **Register the Tool** in `mcp-server/src/tools/index.js`
|
||||||
|
|
||||||
@@ -119,39 +119,39 @@ When adding new features, follow these steps to ensure CLI and MCP compatibility
|
|||||||
```javascript
|
```javascript
|
||||||
// In scripts/modules/task-manager.js
|
// In scripts/modules/task-manager.js
|
||||||
export async function newFeature(param1, param2, options = {}) {
|
export async function newFeature(param1, param2, options = {}) {
|
||||||
try {
|
try {
|
||||||
// Source-specific UI
|
// Source-specific UI
|
||||||
if (options.source !== 'mcp') {
|
if (options.source !== 'mcp') {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
console.log(chalk.blue('Running new feature...'));
|
console.log(chalk.blue('Running new feature...'));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shared core logic
|
// Shared core logic
|
||||||
const result = processFeature(param1, param2);
|
const result = processFeature(param1, param2);
|
||||||
|
|
||||||
// Source-specific return handling
|
// Source-specific return handling
|
||||||
if (options.source === 'mcp') {
|
if (options.source === 'mcp') {
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: result
|
data: result
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// CLI output
|
// CLI output
|
||||||
console.log(chalk.green('Feature completed successfully!'));
|
console.log(chalk.green('Feature completed successfully!'));
|
||||||
displayOutput(result);
|
displayOutput(result);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Error handling based on source
|
// Error handling based on source
|
||||||
if (options.source === 'mcp') {
|
if (options.source === 'mcp') {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: error.message
|
error: error.message
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -163,12 +163,12 @@ import { newFeature } from '../../../scripts/modules/task-manager.js';
|
|||||||
|
|
||||||
// Add to exports
|
// Add to exports
|
||||||
export default {
|
export default {
|
||||||
// ... existing functions
|
// ... existing functions
|
||||||
|
|
||||||
async newFeature(args = {}, options = {}) {
|
async newFeature(args = {}, options = {}) {
|
||||||
const { param1, param2 } = args;
|
const { param1, param2 } = args;
|
||||||
return executeFunction(newFeature, [param1, param2], options);
|
return executeFunction(newFeature, [param1, param2], options);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -177,8 +177,8 @@ export default {
|
|||||||
```javascript
|
```javascript
|
||||||
// In mcp-server/src/tools/utils.js
|
// In mcp-server/src/tools/utils.js
|
||||||
const commandMap = {
|
const commandMap = {
|
||||||
// ... existing mappings
|
// ... existing mappings
|
||||||
'new-feature': 'newFeature'
|
'new-feature': 'newFeature'
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -186,53 +186,53 @@ const commandMap = {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In mcp-server/src/tools/newFeature.js
|
// In mcp-server/src/tools/newFeature.js
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
executeTaskMasterCommand,
|
executeTaskMasterCommand,
|
||||||
createContentResponse,
|
createContentResponse,
|
||||||
createErrorResponse,
|
createErrorResponse
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
|
|
||||||
export function registerNewFeatureTool(server) {
|
export function registerNewFeatureTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "newFeature",
|
name: 'newFeature',
|
||||||
description: "Run the new feature",
|
description: 'Run the new feature',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
param1: z.string().describe("First parameter"),
|
param1: z.string().describe('First parameter'),
|
||||||
param2: z.number().optional().describe("Second parameter"),
|
param2: z.number().optional().describe('Second parameter'),
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
projectRoot: z.string().describe("Root directory of the project")
|
projectRoot: z.string().describe('Root directory of the project')
|
||||||
}),
|
}),
|
||||||
execute: async (args, { log }) => {
|
execute: async (args, { log }) => {
|
||||||
try {
|
try {
|
||||||
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
|
log.info(`Running new feature with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
const cmdArgs = [];
|
const cmdArgs = [];
|
||||||
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
|
if (args.param1) cmdArgs.push(`--param1=${args.param1}`);
|
||||||
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
|
if (args.param2) cmdArgs.push(`--param2=${args.param2}`);
|
||||||
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
if (args.file) cmdArgs.push(`--file=${args.file}`);
|
||||||
|
|
||||||
const projectRoot = args.projectRoot;
|
const projectRoot = args.projectRoot;
|
||||||
|
|
||||||
// Execute the command
|
// Execute the command
|
||||||
const result = await executeTaskMasterCommand(
|
const result = await executeTaskMasterCommand(
|
||||||
"new-feature",
|
'new-feature',
|
||||||
log,
|
log,
|
||||||
cmdArgs,
|
cmdArgs,
|
||||||
projectRoot
|
projectRoot
|
||||||
);
|
);
|
||||||
|
|
||||||
if (!result.success) {
|
if (!result.success) {
|
||||||
throw new Error(result.error);
|
throw new Error(result.error);
|
||||||
}
|
}
|
||||||
|
|
||||||
return createContentResponse(result.stdout);
|
return createContentResponse(result.stdout);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error in new feature: ${error.message}`);
|
log.error(`Error in new feature: ${error.message}`);
|
||||||
return createErrorResponse(`Error in new feature: ${error.message}`);
|
return createErrorResponse(`Error in new feature: ${error.message}`);
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -240,11 +240,11 @@ export function registerNewFeatureTool(server) {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In mcp-server/src/tools/index.js
|
// In mcp-server/src/tools/index.js
|
||||||
import { registerNewFeatureTool } from "./newFeature.js";
|
import { registerNewFeatureTool } from './newFeature.js';
|
||||||
|
|
||||||
export function registerTaskMasterTools(server) {
|
export function registerTaskMasterTools(server) {
|
||||||
// ... existing registrations
|
// ... existing registrations
|
||||||
registerNewFeatureTool(server);
|
registerNewFeatureTool(server);
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -266,4 +266,4 @@ node mcp-server/tests/test-command.js newFeature
|
|||||||
2. **Structured Data for MCP** - Return clean JSON objects from MCP source functions
|
2. **Structured Data for MCP** - Return clean JSON objects from MCP source functions
|
||||||
3. **Consistent Error Handling** - Standardize error formats for both interfaces
|
3. **Consistent Error Handling** - Standardize error formats for both interfaces
|
||||||
4. **Documentation** - Update MCP tool documentation when adding new features
|
4. **Documentation** - Update MCP tool documentation when adding new features
|
||||||
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature
|
5. **Testing** - Test both CLI and MCP interfaces for any new or modified feature
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -6,57 +6,55 @@ This document provides examples of how to use the new AI client utilities with A
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In your direct function implementation:
|
// In your direct function implementation:
|
||||||
import {
|
import {
|
||||||
getAnthropicClientForMCP,
|
getAnthropicClientForMCP,
|
||||||
getModelConfig,
|
getModelConfig,
|
||||||
handleClaudeError
|
handleClaudeError
|
||||||
} from '../utils/ai-client-utils.js';
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
export async function someAiOperationDirect(args, log, context) {
|
export async function someAiOperationDirect(args, log, context) {
|
||||||
try {
|
try {
|
||||||
// Initialize Anthropic client with session from context
|
// Initialize Anthropic client with session from context
|
||||||
const client = getAnthropicClientForMCP(context.session, log);
|
const client = getAnthropicClientForMCP(context.session, log);
|
||||||
|
|
||||||
// Get model configuration with defaults or session overrides
|
// Get model configuration with defaults or session overrides
|
||||||
const modelConfig = getModelConfig(context.session);
|
const modelConfig = getModelConfig(context.session);
|
||||||
|
|
||||||
// Make API call with proper error handling
|
// Make API call with proper error handling
|
||||||
try {
|
try {
|
||||||
const response = await client.messages.create({
|
const response = await client.messages.create({
|
||||||
model: modelConfig.model,
|
model: modelConfig.model,
|
||||||
max_tokens: modelConfig.maxTokens,
|
max_tokens: modelConfig.maxTokens,
|
||||||
temperature: modelConfig.temperature,
|
temperature: modelConfig.temperature,
|
||||||
messages: [
|
messages: [{ role: 'user', content: 'Your prompt here' }]
|
||||||
{ role: 'user', content: 'Your prompt here' }
|
});
|
||||||
]
|
|
||||||
});
|
return {
|
||||||
|
success: true,
|
||||||
return {
|
data: response
|
||||||
success: true,
|
};
|
||||||
data: response
|
} catch (apiError) {
|
||||||
};
|
// Use helper to get user-friendly error message
|
||||||
} catch (apiError) {
|
const friendlyMessage = handleClaudeError(apiError);
|
||||||
// Use helper to get user-friendly error message
|
|
||||||
const friendlyMessage = handleClaudeError(apiError);
|
return {
|
||||||
|
success: false,
|
||||||
return {
|
error: {
|
||||||
success: false,
|
code: 'AI_API_ERROR',
|
||||||
error: {
|
message: friendlyMessage
|
||||||
code: 'AI_API_ERROR',
|
}
|
||||||
message: friendlyMessage
|
};
|
||||||
}
|
}
|
||||||
};
|
} catch (error) {
|
||||||
}
|
// Handle client initialization errors
|
||||||
} catch (error) {
|
return {
|
||||||
// Handle client initialization errors
|
success: false,
|
||||||
return {
|
error: {
|
||||||
success: false,
|
code: 'AI_CLIENT_ERROR',
|
||||||
error: {
|
message: error.message
|
||||||
code: 'AI_CLIENT_ERROR',
|
}
|
||||||
message: error.message
|
};
|
||||||
}
|
}
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -64,86 +62,85 @@ export async function someAiOperationDirect(args, log, context) {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In your MCP tool implementation:
|
// In your MCP tool implementation:
|
||||||
import { AsyncOperationManager, StatusCodes } from '../../utils/async-operation-manager.js';
|
import {
|
||||||
|
AsyncOperationManager,
|
||||||
|
StatusCodes
|
||||||
|
} from '../../utils/async-operation-manager.js';
|
||||||
import { someAiOperationDirect } from '../../core/direct-functions/some-ai-operation.js';
|
import { someAiOperationDirect } from '../../core/direct-functions/some-ai-operation.js';
|
||||||
|
|
||||||
export async function someAiOperation(args, context) {
|
export async function someAiOperation(args, context) {
|
||||||
const { session, mcpLog } = context;
|
const { session, mcpLog } = context;
|
||||||
const log = mcpLog || console;
|
const log = mcpLog || console;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Create operation description
|
// Create operation description
|
||||||
const operationDescription = `AI operation: ${args.someParam}`;
|
const operationDescription = `AI operation: ${args.someParam}`;
|
||||||
|
|
||||||
// Start async operation
|
// Start async operation
|
||||||
const operation = AsyncOperationManager.createOperation(
|
const operation = AsyncOperationManager.createOperation(
|
||||||
operationDescription,
|
operationDescription,
|
||||||
async (reportProgress) => {
|
async (reportProgress) => {
|
||||||
try {
|
try {
|
||||||
// Initial progress report
|
// Initial progress report
|
||||||
reportProgress({
|
reportProgress({
|
||||||
progress: 0,
|
progress: 0,
|
||||||
status: 'Starting AI operation...'
|
status: 'Starting AI operation...'
|
||||||
});
|
});
|
||||||
|
|
||||||
// Call direct function with session and progress reporting
|
// Call direct function with session and progress reporting
|
||||||
const result = await someAiOperationDirect(
|
const result = await someAiOperationDirect(args, log, {
|
||||||
args,
|
reportProgress,
|
||||||
log,
|
mcpLog: log,
|
||||||
{
|
session
|
||||||
reportProgress,
|
});
|
||||||
mcpLog: log,
|
|
||||||
session
|
// Final progress update
|
||||||
}
|
reportProgress({
|
||||||
);
|
progress: 100,
|
||||||
|
status: result.success ? 'Operation completed' : 'Operation failed',
|
||||||
// Final progress update
|
result: result.data,
|
||||||
reportProgress({
|
error: result.error
|
||||||
progress: 100,
|
});
|
||||||
status: result.success ? 'Operation completed' : 'Operation failed',
|
|
||||||
result: result.data,
|
return result;
|
||||||
error: result.error
|
} catch (error) {
|
||||||
});
|
// Handle errors in the operation
|
||||||
|
reportProgress({
|
||||||
return result;
|
progress: 100,
|
||||||
} catch (error) {
|
status: 'Operation failed',
|
||||||
// Handle errors in the operation
|
error: {
|
||||||
reportProgress({
|
message: error.message,
|
||||||
progress: 100,
|
code: error.code || 'OPERATION_FAILED'
|
||||||
status: 'Operation failed',
|
}
|
||||||
error: {
|
});
|
||||||
message: error.message,
|
throw error;
|
||||||
code: error.code || 'OPERATION_FAILED'
|
}
|
||||||
}
|
}
|
||||||
});
|
);
|
||||||
throw error;
|
|
||||||
}
|
// Return immediate response with operation ID
|
||||||
}
|
return {
|
||||||
);
|
status: StatusCodes.ACCEPTED,
|
||||||
|
body: {
|
||||||
// Return immediate response with operation ID
|
success: true,
|
||||||
return {
|
message: 'Operation started',
|
||||||
status: StatusCodes.ACCEPTED,
|
operationId: operation.id
|
||||||
body: {
|
}
|
||||||
success: true,
|
};
|
||||||
message: 'Operation started',
|
} catch (error) {
|
||||||
operationId: operation.id
|
// Handle errors in the MCP tool
|
||||||
}
|
log.error(`Error in someAiOperation: ${error.message}`);
|
||||||
};
|
return {
|
||||||
} catch (error) {
|
status: StatusCodes.INTERNAL_SERVER_ERROR,
|
||||||
// Handle errors in the MCP tool
|
body: {
|
||||||
log.error(`Error in someAiOperation: ${error.message}`);
|
success: false,
|
||||||
return {
|
error: {
|
||||||
status: StatusCodes.INTERNAL_SERVER_ERROR,
|
code: 'OPERATION_FAILED',
|
||||||
body: {
|
message: error.message
|
||||||
success: false,
|
}
|
||||||
error: {
|
}
|
||||||
code: 'OPERATION_FAILED',
|
};
|
||||||
message: error.message
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -151,58 +148,56 @@ export async function someAiOperation(args, context) {
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// In your direct function:
|
// In your direct function:
|
||||||
import {
|
import {
|
||||||
getPerplexityClientForMCP,
|
getPerplexityClientForMCP,
|
||||||
getBestAvailableAIModel
|
getBestAvailableAIModel
|
||||||
} from '../utils/ai-client-utils.js';
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
export async function researchOperationDirect(args, log, context) {
|
export async function researchOperationDirect(args, log, context) {
|
||||||
try {
|
try {
|
||||||
// Get the best AI model for this operation based on needs
|
// Get the best AI model for this operation based on needs
|
||||||
const { type, client } = await getBestAvailableAIModel(
|
const { type, client } = await getBestAvailableAIModel(
|
||||||
context.session,
|
context.session,
|
||||||
{ requiresResearch: true },
|
{ requiresResearch: true },
|
||||||
log
|
log
|
||||||
);
|
);
|
||||||
|
|
||||||
// Report which model we're using
|
// Report which model we're using
|
||||||
if (context.reportProgress) {
|
if (context.reportProgress) {
|
||||||
await context.reportProgress({
|
await context.reportProgress({
|
||||||
progress: 10,
|
progress: 10,
|
||||||
status: `Using ${type} model for research...`
|
status: `Using ${type} model for research...`
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make API call based on the model type
|
// Make API call based on the model type
|
||||||
if (type === 'perplexity') {
|
if (type === 'perplexity') {
|
||||||
// Call Perplexity
|
// Call Perplexity
|
||||||
const response = await client.chat.completions.create({
|
const response = await client.chat.completions.create({
|
||||||
model: context.session?.env?.PERPLEXITY_MODEL || 'sonar-medium-online',
|
model: context.session?.env?.PERPLEXITY_MODEL || 'sonar-medium-online',
|
||||||
messages: [
|
messages: [{ role: 'user', content: args.researchQuery }],
|
||||||
{ role: 'user', content: args.researchQuery }
|
temperature: 0.1
|
||||||
],
|
});
|
||||||
temperature: 0.1
|
|
||||||
});
|
return {
|
||||||
|
success: true,
|
||||||
return {
|
data: response.choices[0].message.content
|
||||||
success: true,
|
};
|
||||||
data: response.choices[0].message.content
|
} else {
|
||||||
};
|
// Call Claude as fallback
|
||||||
} else {
|
// (Implementation depends on specific needs)
|
||||||
// Call Claude as fallback
|
// ...
|
||||||
// (Implementation depends on specific needs)
|
}
|
||||||
// ...
|
} catch (error) {
|
||||||
}
|
// Handle errors
|
||||||
} catch (error) {
|
return {
|
||||||
// Handle errors
|
success: false,
|
||||||
return {
|
error: {
|
||||||
success: false,
|
code: 'RESEARCH_ERROR',
|
||||||
error: {
|
message: error.message
|
||||||
code: 'RESEARCH_ERROR',
|
}
|
||||||
message: error.message
|
};
|
||||||
}
|
}
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -214,9 +209,9 @@ import { getModelConfig } from '../utils/ai-client-utils.js';
|
|||||||
|
|
||||||
// Using custom defaults for a specific operation
|
// Using custom defaults for a specific operation
|
||||||
const operationDefaults = {
|
const operationDefaults = {
|
||||||
model: 'claude-3-haiku-20240307', // Faster, smaller model
|
model: 'claude-3-haiku-20240307', // Faster, smaller model
|
||||||
maxTokens: 1000, // Lower token limit
|
maxTokens: 1000, // Lower token limit
|
||||||
temperature: 0.2 // Lower temperature for more deterministic output
|
temperature: 0.2 // Lower temperature for more deterministic output
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get model config with operation-specific defaults
|
// Get model config with operation-specific defaults
|
||||||
@@ -224,30 +219,34 @@ const modelConfig = getModelConfig(context.session, operationDefaults);
|
|||||||
|
|
||||||
// Now use modelConfig in your API calls
|
// Now use modelConfig in your API calls
|
||||||
const response = await client.messages.create({
|
const response = await client.messages.create({
|
||||||
model: modelConfig.model,
|
model: modelConfig.model,
|
||||||
max_tokens: modelConfig.maxTokens,
|
max_tokens: modelConfig.maxTokens,
|
||||||
temperature: modelConfig.temperature,
|
temperature: modelConfig.temperature
|
||||||
// Other parameters...
|
// Other parameters...
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|
||||||
1. **Error Handling**:
|
1. **Error Handling**:
|
||||||
|
|
||||||
- Always use try/catch blocks around both client initialization and API calls
|
- Always use try/catch blocks around both client initialization and API calls
|
||||||
- Use `handleClaudeError` to provide user-friendly error messages
|
- Use `handleClaudeError` to provide user-friendly error messages
|
||||||
- Return standardized error objects with code and message
|
- Return standardized error objects with code and message
|
||||||
|
|
||||||
2. **Progress Reporting**:
|
2. **Progress Reporting**:
|
||||||
|
|
||||||
- Report progress at key points (starting, processing, completing)
|
- Report progress at key points (starting, processing, completing)
|
||||||
- Include meaningful status messages
|
- Include meaningful status messages
|
||||||
- Include error details in progress reports when failures occur
|
- Include error details in progress reports when failures occur
|
||||||
|
|
||||||
3. **Session Handling**:
|
3. **Session Handling**:
|
||||||
|
|
||||||
- Always pass the session from the context to the AI client getters
|
- Always pass the session from the context to the AI client getters
|
||||||
- Use `getModelConfig` to respect user settings from session
|
- Use `getModelConfig` to respect user settings from session
|
||||||
|
|
||||||
4. **Model Selection**:
|
4. **Model Selection**:
|
||||||
|
|
||||||
- Use `getBestAvailableAIModel` when you need to select between different models
|
- Use `getBestAvailableAIModel` when you need to select between different models
|
||||||
- Set `requiresResearch: true` when you need Perplexity capabilities
|
- Set `requiresResearch: true` when you need Perplexity capabilities
|
||||||
|
|
||||||
@@ -255,4 +254,4 @@ const response = await client.messages.create({
|
|||||||
- Create descriptive operation names
|
- Create descriptive operation names
|
||||||
- Handle all errors within the operation function
|
- Handle all errors within the operation function
|
||||||
- Return standardized results from direct functions
|
- Return standardized results from direct functions
|
||||||
- Return immediate responses with operation IDs
|
- Return immediate responses with operation IDs
|
||||||
|
|||||||
@@ -14,22 +14,22 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"mcpServers": {
|
"mcpServers": {
|
||||||
"taskmaster-ai": {
|
"taskmaster-ai": {
|
||||||
"command": "npx",
|
"command": "npx",
|
||||||
"args": ["-y", "task-master-ai", "mcp-server"],
|
"args": ["-y", "task-master-ai", "mcp-server"],
|
||||||
"env": {
|
"env": {
|
||||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||||
"MODEL": "claude-3-7-sonnet-20250219",
|
"MODEL": "claude-3-7-sonnet-20250219",
|
||||||
"PERPLEXITY_MODEL": "sonar-pro",
|
"PERPLEXITY_MODEL": "sonar-pro",
|
||||||
"MAX_TOKENS": 128000,
|
"MAX_TOKENS": 128000,
|
||||||
"TEMPERATURE": 0.2,
|
"TEMPERATURE": 0.2,
|
||||||
"DEFAULT_SUBTASKS": 5,
|
"DEFAULT_SUBTASKS": 5,
|
||||||
"DEFAULT_PRIORITY": "medium"
|
"DEFAULT_PRIORITY": "medium"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
41
entries.json
41
entries.json
@@ -1,41 +0,0 @@
|
|||||||
import os
|
|
||||||
import json
|
|
||||||
|
|
||||||
# Path to Cursor's history folder
|
|
||||||
history_path = os.path.expanduser('~/Library/Application Support/Cursor/User/History')
|
|
||||||
|
|
||||||
# File to search for
|
|
||||||
target_file = 'tasks/tasks.json'
|
|
||||||
|
|
||||||
# Function to search through all entries.json files
|
|
||||||
def search_entries_for_file(history_path, target_file):
|
|
||||||
matching_folders = []
|
|
||||||
for folder in os.listdir(history_path):
|
|
||||||
folder_path = os.path.join(history_path, folder)
|
|
||||||
if not os.path.isdir(folder_path):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Look for entries.json
|
|
||||||
entries_file = os.path.join(folder_path, 'entries.json')
|
|
||||||
if not os.path.exists(entries_file):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Parse entries.json to find the resource key
|
|
||||||
with open(entries_file, 'r') as f:
|
|
||||||
data = json.load(f)
|
|
||||||
resource = data.get('resource', None)
|
|
||||||
if resource and target_file in resource:
|
|
||||||
matching_folders.append(folder_path)
|
|
||||||
|
|
||||||
return matching_folders
|
|
||||||
|
|
||||||
# Search for the target file
|
|
||||||
matching_folders = search_entries_for_file(history_path, target_file)
|
|
||||||
|
|
||||||
# Output the matching folders
|
|
||||||
if matching_folders:
|
|
||||||
print(f"Found {target_file} in the following folders:")
|
|
||||||
for folder in matching_folders:
|
|
||||||
print(folder)
|
|
||||||
else:
|
|
||||||
print(f"No matches found for {target_file}.")
|
|
||||||
190
index.js
190
index.js
@@ -41,27 +41,27 @@ export const devScriptPath = resolve(__dirname, './scripts/dev.js');
|
|||||||
|
|
||||||
// Export a function to initialize a new project programmatically
|
// Export a function to initialize a new project programmatically
|
||||||
export const initProject = async (options = {}) => {
|
export const initProject = async (options = {}) => {
|
||||||
const init = await import('./scripts/init.js');
|
const init = await import('./scripts/init.js');
|
||||||
return init.initializeProject(options);
|
return init.initializeProject(options);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Export a function to run init as a CLI command
|
// Export a function to run init as a CLI command
|
||||||
export const runInitCLI = async () => {
|
export const runInitCLI = async () => {
|
||||||
// Using spawn to ensure proper handling of stdio and process exit
|
// Using spawn to ensure proper handling of stdio and process exit
|
||||||
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
|
const child = spawn('node', [resolve(__dirname, './scripts/init.js')], {
|
||||||
stdio: 'inherit',
|
stdio: 'inherit',
|
||||||
cwd: process.cwd()
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
if (code === 0) {
|
if (code === 0) {
|
||||||
resolve();
|
resolve();
|
||||||
} else {
|
} else {
|
||||||
reject(new Error(`Init script exited with code ${code}`));
|
reject(new Error(`Init script exited with code ${code}`));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
// Export version information
|
// Export version information
|
||||||
@@ -69,81 +69,81 @@ export const version = packageJson.version;
|
|||||||
|
|
||||||
// CLI implementation
|
// CLI implementation
|
||||||
if (import.meta.url === `file://${process.argv[1]}`) {
|
if (import.meta.url === `file://${process.argv[1]}`) {
|
||||||
const program = new Command();
|
const program = new Command();
|
||||||
|
|
||||||
program
|
program
|
||||||
.name('task-master')
|
.name('task-master')
|
||||||
.description('Claude Task Master CLI')
|
.description('Claude Task Master CLI')
|
||||||
.version(version);
|
.version(version);
|
||||||
|
|
||||||
program
|
program
|
||||||
.command('init')
|
.command('init')
|
||||||
.description('Initialize a new project')
|
.description('Initialize a new project')
|
||||||
.action(() => {
|
.action(() => {
|
||||||
runInitCLI().catch(err => {
|
runInitCLI().catch((err) => {
|
||||||
console.error('Init failed:', err.message);
|
console.error('Init failed:', err.message);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
program
|
program
|
||||||
.command('dev')
|
.command('dev')
|
||||||
.description('Run the dev.js script')
|
.description('Run the dev.js script')
|
||||||
.allowUnknownOption(true)
|
.allowUnknownOption(true)
|
||||||
.action(() => {
|
.action(() => {
|
||||||
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
const args = process.argv.slice(process.argv.indexOf('dev') + 1);
|
||||||
const child = spawn('node', [devScriptPath, ...args], {
|
const child = spawn('node', [devScriptPath, ...args], {
|
||||||
stdio: 'inherit',
|
stdio: 'inherit',
|
||||||
cwd: process.cwd()
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
|
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
process.exit(code);
|
process.exit(code);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
// Add shortcuts for common dev.js commands
|
// Add shortcuts for common dev.js commands
|
||||||
program
|
program
|
||||||
.command('list')
|
.command('list')
|
||||||
.description('List all tasks')
|
.description('List all tasks')
|
||||||
.action(() => {
|
.action(() => {
|
||||||
const child = spawn('node', [devScriptPath, 'list'], {
|
const child = spawn('node', [devScriptPath, 'list'], {
|
||||||
stdio: 'inherit',
|
stdio: 'inherit',
|
||||||
cwd: process.cwd()
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
|
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
process.exit(code);
|
process.exit(code);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
program
|
program
|
||||||
.command('next')
|
.command('next')
|
||||||
.description('Show the next task to work on')
|
.description('Show the next task to work on')
|
||||||
.action(() => {
|
.action(() => {
|
||||||
const child = spawn('node', [devScriptPath, 'next'], {
|
const child = spawn('node', [devScriptPath, 'next'], {
|
||||||
stdio: 'inherit',
|
stdio: 'inherit',
|
||||||
cwd: process.cwd()
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
|
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
process.exit(code);
|
process.exit(code);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
program
|
program
|
||||||
.command('generate')
|
.command('generate')
|
||||||
.description('Generate task files')
|
.description('Generate task files')
|
||||||
.action(() => {
|
.action(() => {
|
||||||
const child = spawn('node', [devScriptPath, 'generate'], {
|
const child = spawn('node', [devScriptPath, 'generate'], {
|
||||||
stdio: 'inherit',
|
stdio: 'inherit',
|
||||||
cwd: process.cwd()
|
cwd: process.cwd()
|
||||||
});
|
});
|
||||||
|
|
||||||
child.on('close', (code) => {
|
child.on('close', (code) => {
|
||||||
process.exit(code);
|
process.exit(code);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
program.parse(process.argv);
|
program.parse(process.argv);
|
||||||
}
|
}
|
||||||
|
|||||||
110
jest.config.js
110
jest.config.js
@@ -1,56 +1,56 @@
|
|||||||
export default {
|
export default {
|
||||||
// Use Node.js environment for testing
|
// Use Node.js environment for testing
|
||||||
testEnvironment: 'node',
|
testEnvironment: 'node',
|
||||||
|
|
||||||
// Automatically clear mock calls between every test
|
// Automatically clear mock calls between every test
|
||||||
clearMocks: true,
|
clearMocks: true,
|
||||||
|
|
||||||
// Indicates whether the coverage information should be collected while executing the test
|
// Indicates whether the coverage information should be collected while executing the test
|
||||||
collectCoverage: false,
|
collectCoverage: false,
|
||||||
|
|
||||||
// The directory where Jest should output its coverage files
|
// The directory where Jest should output its coverage files
|
||||||
coverageDirectory: 'coverage',
|
coverageDirectory: 'coverage',
|
||||||
|
|
||||||
// A list of paths to directories that Jest should use to search for files in
|
// A list of paths to directories that Jest should use to search for files in
|
||||||
roots: ['<rootDir>/tests'],
|
roots: ['<rootDir>/tests'],
|
||||||
|
|
||||||
// The glob patterns Jest uses to detect test files
|
// The glob patterns Jest uses to detect test files
|
||||||
testMatch: [
|
testMatch: [
|
||||||
'**/__tests__/**/*.js',
|
'**/__tests__/**/*.js',
|
||||||
'**/?(*.)+(spec|test).js',
|
'**/?(*.)+(spec|test).js',
|
||||||
'**/tests/*.test.js'
|
'**/tests/*.test.js'
|
||||||
],
|
],
|
||||||
|
|
||||||
// Transform files
|
// Transform files
|
||||||
transform: {},
|
transform: {},
|
||||||
|
|
||||||
// Disable transformations for node_modules
|
// Disable transformations for node_modules
|
||||||
transformIgnorePatterns: ['/node_modules/'],
|
transformIgnorePatterns: ['/node_modules/'],
|
||||||
|
|
||||||
// Set moduleNameMapper for absolute paths
|
// Set moduleNameMapper for absolute paths
|
||||||
moduleNameMapper: {
|
moduleNameMapper: {
|
||||||
'^@/(.*)$': '<rootDir>/$1'
|
'^@/(.*)$': '<rootDir>/$1'
|
||||||
},
|
},
|
||||||
|
|
||||||
// Setup module aliases
|
// Setup module aliases
|
||||||
moduleDirectories: ['node_modules', '<rootDir>'],
|
moduleDirectories: ['node_modules', '<rootDir>'],
|
||||||
|
|
||||||
// Configure test coverage thresholds
|
// Configure test coverage thresholds
|
||||||
coverageThreshold: {
|
coverageThreshold: {
|
||||||
global: {
|
global: {
|
||||||
branches: 80,
|
branches: 80,
|
||||||
functions: 80,
|
functions: 80,
|
||||||
lines: 80,
|
lines: 80,
|
||||||
statements: 80
|
statements: 80
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
// Generate coverage report in these formats
|
// Generate coverage report in these formats
|
||||||
coverageReporters: ['text', 'lcov'],
|
coverageReporters: ['text', 'lcov'],
|
||||||
|
|
||||||
// Verbose output
|
// Verbose output
|
||||||
verbose: true,
|
verbose: true,
|
||||||
|
|
||||||
// Setup file
|
// Setup file
|
||||||
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
|
setupFilesAfterEnv: ['<rootDir>/tests/setup.js']
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
#!/usr/bin/env node
|
#!/usr/bin/env node
|
||||||
|
|
||||||
import TaskMasterMCPServer from "./src/index.js";
|
import TaskMasterMCPServer from './src/index.js';
|
||||||
import dotenv from "dotenv";
|
import dotenv from 'dotenv';
|
||||||
import logger from "./src/logger.js";
|
import logger from './src/logger.js';
|
||||||
|
|
||||||
// Load environment variables
|
// Load environment variables
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
@@ -11,25 +11,25 @@ dotenv.config();
|
|||||||
* Start the MCP server
|
* Start the MCP server
|
||||||
*/
|
*/
|
||||||
async function startServer() {
|
async function startServer() {
|
||||||
const server = new TaskMasterMCPServer();
|
const server = new TaskMasterMCPServer();
|
||||||
|
|
||||||
// Handle graceful shutdown
|
// Handle graceful shutdown
|
||||||
process.on("SIGINT", async () => {
|
process.on('SIGINT', async () => {
|
||||||
await server.stop();
|
await server.stop();
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
process.on("SIGTERM", async () => {
|
process.on('SIGTERM', async () => {
|
||||||
await server.stop();
|
await server.stop();
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await server.start();
|
await server.start();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`Failed to start MCP server: ${error.message}`);
|
logger.error(`Failed to start MCP server: ${error.message}`);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the server
|
// Start the server
|
||||||
|
|||||||
@@ -2,84 +2,90 @@ import { jest } from '@jest/globals';
|
|||||||
import { ContextManager } from '../context-manager.js';
|
import { ContextManager } from '../context-manager.js';
|
||||||
|
|
||||||
describe('ContextManager', () => {
|
describe('ContextManager', () => {
|
||||||
let contextManager;
|
let contextManager;
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
contextManager = new ContextManager({
|
contextManager = new ContextManager({
|
||||||
maxCacheSize: 10,
|
maxCacheSize: 10,
|
||||||
ttl: 1000, // 1 second for testing
|
ttl: 1000, // 1 second for testing
|
||||||
maxContextSize: 1000
|
maxContextSize: 1000
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('getContext', () => {
|
describe('getContext', () => {
|
||||||
it('should create a new context when not in cache', async () => {
|
it('should create a new context when not in cache', async () => {
|
||||||
const context = await contextManager.getContext('test-id', { test: true });
|
const context = await contextManager.getContext('test-id', {
|
||||||
expect(context.id).toBe('test-id');
|
test: true
|
||||||
expect(context.metadata.test).toBe(true);
|
});
|
||||||
expect(contextManager.stats.misses).toBe(1);
|
expect(context.id).toBe('test-id');
|
||||||
expect(contextManager.stats.hits).toBe(0);
|
expect(context.metadata.test).toBe(true);
|
||||||
});
|
expect(contextManager.stats.misses).toBe(1);
|
||||||
|
expect(contextManager.stats.hits).toBe(0);
|
||||||
|
});
|
||||||
|
|
||||||
it('should return cached context when available', async () => {
|
it('should return cached context when available', async () => {
|
||||||
// First call creates the context
|
// First call creates the context
|
||||||
await contextManager.getContext('test-id', { test: true });
|
await contextManager.getContext('test-id', { test: true });
|
||||||
|
|
||||||
// Second call should hit cache
|
|
||||||
const context = await contextManager.getContext('test-id', { test: true });
|
|
||||||
expect(context.id).toBe('test-id');
|
|
||||||
expect(context.metadata.test).toBe(true);
|
|
||||||
expect(contextManager.stats.hits).toBe(1);
|
|
||||||
expect(contextManager.stats.misses).toBe(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should respect TTL settings', async () => {
|
// Second call should hit cache
|
||||||
// Create context
|
const context = await contextManager.getContext('test-id', {
|
||||||
await contextManager.getContext('test-id', { test: true });
|
test: true
|
||||||
|
});
|
||||||
// Wait for TTL to expire
|
expect(context.id).toBe('test-id');
|
||||||
await new Promise(resolve => setTimeout(resolve, 1100));
|
expect(context.metadata.test).toBe(true);
|
||||||
|
expect(contextManager.stats.hits).toBe(1);
|
||||||
// Should create new context
|
expect(contextManager.stats.misses).toBe(1);
|
||||||
await contextManager.getContext('test-id', { test: true });
|
});
|
||||||
expect(contextManager.stats.misses).toBe(2);
|
|
||||||
expect(contextManager.stats.hits).toBe(0);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('updateContext', () => {
|
it('should respect TTL settings', async () => {
|
||||||
it('should update existing context metadata', async () => {
|
// Create context
|
||||||
await contextManager.getContext('test-id', { initial: true });
|
await contextManager.getContext('test-id', { test: true });
|
||||||
const updated = await contextManager.updateContext('test-id', { updated: true });
|
|
||||||
|
|
||||||
expect(updated.metadata.initial).toBe(true);
|
|
||||||
expect(updated.metadata.updated).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('invalidateContext', () => {
|
// Wait for TTL to expire
|
||||||
it('should remove context from cache', async () => {
|
await new Promise((resolve) => setTimeout(resolve, 1100));
|
||||||
await contextManager.getContext('test-id', { test: true });
|
|
||||||
contextManager.invalidateContext('test-id', { test: true });
|
|
||||||
|
|
||||||
// Should be a cache miss
|
|
||||||
await contextManager.getContext('test-id', { test: true });
|
|
||||||
expect(contextManager.stats.invalidations).toBe(1);
|
|
||||||
expect(contextManager.stats.misses).toBe(2);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('getStats', () => {
|
// Should create new context
|
||||||
it('should return current cache statistics', async () => {
|
await contextManager.getContext('test-id', { test: true });
|
||||||
await contextManager.getContext('test-id', { test: true });
|
expect(contextManager.stats.misses).toBe(2);
|
||||||
const stats = contextManager.getStats();
|
expect(contextManager.stats.hits).toBe(0);
|
||||||
|
});
|
||||||
expect(stats.hits).toBe(0);
|
});
|
||||||
expect(stats.misses).toBe(1);
|
|
||||||
expect(stats.invalidations).toBe(0);
|
describe('updateContext', () => {
|
||||||
expect(stats.size).toBe(1);
|
it('should update existing context metadata', async () => {
|
||||||
expect(stats.maxSize).toBe(10);
|
await contextManager.getContext('test-id', { initial: true });
|
||||||
expect(stats.ttl).toBe(1000);
|
const updated = await contextManager.updateContext('test-id', {
|
||||||
});
|
updated: true
|
||||||
});
|
});
|
||||||
});
|
|
||||||
|
expect(updated.metadata.initial).toBe(true);
|
||||||
|
expect(updated.metadata.updated).toBe(true);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('invalidateContext', () => {
|
||||||
|
it('should remove context from cache', async () => {
|
||||||
|
await contextManager.getContext('test-id', { test: true });
|
||||||
|
contextManager.invalidateContext('test-id', { test: true });
|
||||||
|
|
||||||
|
// Should be a cache miss
|
||||||
|
await contextManager.getContext('test-id', { test: true });
|
||||||
|
expect(contextManager.stats.invalidations).toBe(1);
|
||||||
|
expect(contextManager.stats.misses).toBe(2);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('getStats', () => {
|
||||||
|
it('should return current cache statistics', async () => {
|
||||||
|
await contextManager.getContext('test-id', { test: true });
|
||||||
|
const stats = contextManager.getStats();
|
||||||
|
|
||||||
|
expect(stats.hits).toBe(0);
|
||||||
|
expect(stats.misses).toBe(1);
|
||||||
|
expect(stats.invalidations).toBe(0);
|
||||||
|
expect(stats.size).toBe(1);
|
||||||
|
expect(stats.maxSize).toBe(10);
|
||||||
|
expect(stats.ttl).toBe(1000);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|||||||
@@ -15,156 +15,157 @@ import { LRUCache } from 'lru-cache';
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
export class ContextManager {
|
export class ContextManager {
|
||||||
/**
|
/**
|
||||||
* Create a new ContextManager instance
|
* Create a new ContextManager instance
|
||||||
* @param {ContextManagerConfig} config - Configuration options
|
* @param {ContextManagerConfig} config - Configuration options
|
||||||
*/
|
*/
|
||||||
constructor(config = {}) {
|
constructor(config = {}) {
|
||||||
this.config = {
|
this.config = {
|
||||||
maxCacheSize: config.maxCacheSize || 1000,
|
maxCacheSize: config.maxCacheSize || 1000,
|
||||||
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
|
ttl: config.ttl || 1000 * 60 * 5, // 5 minutes default
|
||||||
maxContextSize: config.maxContextSize || 4000
|
maxContextSize: config.maxContextSize || 4000
|
||||||
};
|
};
|
||||||
|
|
||||||
// Initialize LRU cache for context data
|
// Initialize LRU cache for context data
|
||||||
this.cache = new LRUCache({
|
this.cache = new LRUCache({
|
||||||
max: this.config.maxCacheSize,
|
max: this.config.maxCacheSize,
|
||||||
ttl: this.config.ttl,
|
ttl: this.config.ttl,
|
||||||
updateAgeOnGet: true
|
updateAgeOnGet: true
|
||||||
});
|
});
|
||||||
|
|
||||||
// Cache statistics
|
// Cache statistics
|
||||||
this.stats = {
|
this.stats = {
|
||||||
hits: 0,
|
hits: 0,
|
||||||
misses: 0,
|
misses: 0,
|
||||||
invalidations: 0
|
invalidations: 0
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new context or retrieve from cache
|
* Create a new context or retrieve from cache
|
||||||
* @param {string} contextId - Unique identifier for the context
|
* @param {string} contextId - Unique identifier for the context
|
||||||
* @param {Object} metadata - Additional metadata for the context
|
* @param {Object} metadata - Additional metadata for the context
|
||||||
* @returns {Object} Context object with metadata
|
* @returns {Object} Context object with metadata
|
||||||
*/
|
*/
|
||||||
async getContext(contextId, metadata = {}) {
|
async getContext(contextId, metadata = {}) {
|
||||||
const cacheKey = this._getCacheKey(contextId, metadata);
|
const cacheKey = this._getCacheKey(contextId, metadata);
|
||||||
|
|
||||||
// Try to get from cache first
|
|
||||||
const cached = this.cache.get(cacheKey);
|
|
||||||
if (cached) {
|
|
||||||
this.stats.hits++;
|
|
||||||
return cached;
|
|
||||||
}
|
|
||||||
|
|
||||||
this.stats.misses++;
|
// Try to get from cache first
|
||||||
|
const cached = this.cache.get(cacheKey);
|
||||||
// Create new context if not in cache
|
if (cached) {
|
||||||
const context = {
|
this.stats.hits++;
|
||||||
id: contextId,
|
return cached;
|
||||||
metadata: {
|
}
|
||||||
...metadata,
|
|
||||||
created: new Date().toISOString()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Cache the new context
|
this.stats.misses++;
|
||||||
this.cache.set(cacheKey, context);
|
|
||||||
|
|
||||||
return context;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
// Create new context if not in cache
|
||||||
* Update an existing context
|
const context = {
|
||||||
* @param {string} contextId - Context identifier
|
id: contextId,
|
||||||
* @param {Object} updates - Updates to apply to the context
|
metadata: {
|
||||||
* @returns {Object} Updated context
|
...metadata,
|
||||||
*/
|
created: new Date().toISOString()
|
||||||
async updateContext(contextId, updates) {
|
}
|
||||||
const context = await this.getContext(contextId);
|
};
|
||||||
|
|
||||||
// Apply updates to context
|
|
||||||
Object.assign(context.metadata, updates);
|
|
||||||
|
|
||||||
// Update cache
|
|
||||||
const cacheKey = this._getCacheKey(contextId, context.metadata);
|
|
||||||
this.cache.set(cacheKey, context);
|
|
||||||
|
|
||||||
return context;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
// Cache the new context
|
||||||
* Invalidate a context in the cache
|
this.cache.set(cacheKey, context);
|
||||||
* @param {string} contextId - Context identifier
|
|
||||||
* @param {Object} metadata - Metadata used in the cache key
|
|
||||||
*/
|
|
||||||
invalidateContext(contextId, metadata = {}) {
|
|
||||||
const cacheKey = this._getCacheKey(contextId, metadata);
|
|
||||||
this.cache.delete(cacheKey);
|
|
||||||
this.stats.invalidations++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
return context;
|
||||||
* Get cached data associated with a specific key.
|
}
|
||||||
* Increments cache hit stats if found.
|
|
||||||
* @param {string} key - The cache key.
|
|
||||||
* @returns {any | undefined} The cached data or undefined if not found/expired.
|
|
||||||
*/
|
|
||||||
getCachedData(key) {
|
|
||||||
const cached = this.cache.get(key);
|
|
||||||
if (cached !== undefined) { // Check for undefined specifically, as null/false might be valid cached values
|
|
||||||
this.stats.hits++;
|
|
||||||
return cached;
|
|
||||||
}
|
|
||||||
this.stats.misses++;
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set data in the cache with a specific key.
|
* Update an existing context
|
||||||
* @param {string} key - The cache key.
|
* @param {string} contextId - Context identifier
|
||||||
* @param {any} data - The data to cache.
|
* @param {Object} updates - Updates to apply to the context
|
||||||
*/
|
* @returns {Object} Updated context
|
||||||
setCachedData(key, data) {
|
*/
|
||||||
this.cache.set(key, data);
|
async updateContext(contextId, updates) {
|
||||||
}
|
const context = await this.getContext(contextId);
|
||||||
|
|
||||||
/**
|
// Apply updates to context
|
||||||
* Invalidate a specific cache key.
|
Object.assign(context.metadata, updates);
|
||||||
* Increments invalidation stats.
|
|
||||||
* @param {string} key - The cache key to invalidate.
|
|
||||||
*/
|
|
||||||
invalidateCacheKey(key) {
|
|
||||||
this.cache.delete(key);
|
|
||||||
this.stats.invalidations++;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
// Update cache
|
||||||
* Get cache statistics
|
const cacheKey = this._getCacheKey(contextId, context.metadata);
|
||||||
* @returns {Object} Cache statistics
|
this.cache.set(cacheKey, context);
|
||||||
*/
|
|
||||||
getStats() {
|
|
||||||
return {
|
|
||||||
hits: this.stats.hits,
|
|
||||||
misses: this.stats.misses,
|
|
||||||
invalidations: this.stats.invalidations,
|
|
||||||
size: this.cache.size,
|
|
||||||
maxSize: this.config.maxCacheSize,
|
|
||||||
ttl: this.config.ttl
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
return context;
|
||||||
* Generate a cache key from context ID and metadata
|
}
|
||||||
* @private
|
|
||||||
* @deprecated No longer used for direct cache key generation outside the manager.
|
/**
|
||||||
* Prefer generating specific keys in calling functions.
|
* Invalidate a context in the cache
|
||||||
*/
|
* @param {string} contextId - Context identifier
|
||||||
_getCacheKey(contextId, metadata) {
|
* @param {Object} metadata - Metadata used in the cache key
|
||||||
// Kept for potential backward compatibility or internal use if needed later.
|
*/
|
||||||
return `${contextId}:${JSON.stringify(metadata)}`;
|
invalidateContext(contextId, metadata = {}) {
|
||||||
}
|
const cacheKey = this._getCacheKey(contextId, metadata);
|
||||||
|
this.cache.delete(cacheKey);
|
||||||
|
this.stats.invalidations++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get cached data associated with a specific key.
|
||||||
|
* Increments cache hit stats if found.
|
||||||
|
* @param {string} key - The cache key.
|
||||||
|
* @returns {any | undefined} The cached data or undefined if not found/expired.
|
||||||
|
*/
|
||||||
|
getCachedData(key) {
|
||||||
|
const cached = this.cache.get(key);
|
||||||
|
if (cached !== undefined) {
|
||||||
|
// Check for undefined specifically, as null/false might be valid cached values
|
||||||
|
this.stats.hits++;
|
||||||
|
return cached;
|
||||||
|
}
|
||||||
|
this.stats.misses++;
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set data in the cache with a specific key.
|
||||||
|
* @param {string} key - The cache key.
|
||||||
|
* @param {any} data - The data to cache.
|
||||||
|
*/
|
||||||
|
setCachedData(key, data) {
|
||||||
|
this.cache.set(key, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Invalidate a specific cache key.
|
||||||
|
* Increments invalidation stats.
|
||||||
|
* @param {string} key - The cache key to invalidate.
|
||||||
|
*/
|
||||||
|
invalidateCacheKey(key) {
|
||||||
|
this.cache.delete(key);
|
||||||
|
this.stats.invalidations++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get cache statistics
|
||||||
|
* @returns {Object} Cache statistics
|
||||||
|
*/
|
||||||
|
getStats() {
|
||||||
|
return {
|
||||||
|
hits: this.stats.hits,
|
||||||
|
misses: this.stats.misses,
|
||||||
|
invalidations: this.stats.invalidations,
|
||||||
|
size: this.cache.size,
|
||||||
|
maxSize: this.config.maxCacheSize,
|
||||||
|
ttl: this.config.ttl
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a cache key from context ID and metadata
|
||||||
|
* @private
|
||||||
|
* @deprecated No longer used for direct cache key generation outside the manager.
|
||||||
|
* Prefer generating specific keys in calling functions.
|
||||||
|
*/
|
||||||
|
_getCacheKey(contextId, metadata) {
|
||||||
|
// Kept for potential backward compatibility or internal use if needed later.
|
||||||
|
return `${contextId}:${JSON.stringify(metadata)}`;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export a singleton instance with default config
|
// Export a singleton instance with default config
|
||||||
export const contextManager = new ContextManager();
|
export const contextManager = new ContextManager();
|
||||||
|
|||||||
@@ -5,11 +5,14 @@
|
|||||||
|
|
||||||
import { addDependency } from '../../../../scripts/modules/dependency-manager.js';
|
import { addDependency } from '../../../../scripts/modules/dependency-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for addDependency with error handling.
|
* Direct function wrapper for addDependency with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments
|
* @param {Object} args - Command arguments
|
||||||
* @param {string|number} args.id - Task ID to add dependency to
|
* @param {string|number} args.id - Task ID to add dependency to
|
||||||
* @param {string|number} args.dependsOn - Task ID that will become a dependency
|
* @param {string|number} args.dependsOn - Task ID that will become a dependency
|
||||||
@@ -19,67 +22,75 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
|
|||||||
* @returns {Promise<Object>} - Result object with success status and data/error information
|
* @returns {Promise<Object>} - Result object with success status and data/error information
|
||||||
*/
|
*/
|
||||||
export async function addDependencyDirect(args, log) {
|
export async function addDependencyDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Adding dependency with args: ${JSON.stringify(args)}`);
|
log.info(`Adding dependency with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Validate required parameters
|
// Validate required parameters
|
||||||
if (!args.id) {
|
if (!args.id) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Task ID (id) is required'
|
message: 'Task ID (id) is required'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!args.dependsOn) {
|
if (!args.dependsOn) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Dependency ID (dependsOn) is required'
|
message: 'Dependency ID (dependsOn) is required'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the tasks.json path
|
// Find the tasks.json path
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
|
|
||||||
// Format IDs for the core function
|
// Format IDs for the core function
|
||||||
const taskId = args.id.includes && args.id.includes('.') ? args.id : parseInt(args.id, 10);
|
const taskId =
|
||||||
const dependencyId = args.dependsOn.includes && args.dependsOn.includes('.') ? args.dependsOn : parseInt(args.dependsOn, 10);
|
args.id.includes && args.id.includes('.')
|
||||||
|
? args.id
|
||||||
log.info(`Adding dependency: task ${taskId} will depend on ${dependencyId}`);
|
: parseInt(args.id, 10);
|
||||||
|
const dependencyId =
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
args.dependsOn.includes && args.dependsOn.includes('.')
|
||||||
enableSilentMode();
|
? args.dependsOn
|
||||||
|
: parseInt(args.dependsOn, 10);
|
||||||
// Call the core function
|
|
||||||
await addDependency(tasksPath, taskId, dependencyId);
|
log.info(
|
||||||
|
`Adding dependency: task ${taskId} will depend on ${dependencyId}`
|
||||||
// Restore normal logging
|
);
|
||||||
disableSilentMode();
|
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
return {
|
enableSilentMode();
|
||||||
success: true,
|
|
||||||
data: {
|
// Call the core function
|
||||||
message: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,
|
await addDependency(tasksPath, taskId, dependencyId);
|
||||||
taskId: taskId,
|
|
||||||
dependencyId: dependencyId
|
// Restore normal logging
|
||||||
}
|
disableSilentMode();
|
||||||
};
|
|
||||||
} catch (error) {
|
return {
|
||||||
// Make sure to restore normal logging even if there's an error
|
success: true,
|
||||||
disableSilentMode();
|
data: {
|
||||||
|
message: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,
|
||||||
log.error(`Error in addDependencyDirect: ${error.message}`);
|
taskId: taskId,
|
||||||
return {
|
dependencyId: dependencyId
|
||||||
success: false,
|
}
|
||||||
error: {
|
};
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
} catch (error) {
|
||||||
message: error.message
|
// Make sure to restore normal logging even if there's an error
|
||||||
}
|
disableSilentMode();
|
||||||
};
|
|
||||||
}
|
log.error(`Error in addDependencyDirect: ${error.message}`);
|
||||||
}
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,7 +4,10 @@
|
|||||||
|
|
||||||
import { addSubtask } from '../../../../scripts/modules/task-manager.js';
|
import { addSubtask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a subtask to an existing task
|
* Add a subtask to an existing task
|
||||||
@@ -23,106 +26,118 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
|
|||||||
* @returns {Promise<{success: boolean, data?: Object, error?: string}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: string}>}
|
||||||
*/
|
*/
|
||||||
export async function addSubtaskDirect(args, log) {
|
export async function addSubtaskDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
if (!args.id) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
|
||||||
message: 'Parent task ID is required'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Either taskId or title must be provided
|
|
||||||
if (!args.taskId && !args.title) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
|
||||||
message: 'Either taskId or title must be provided'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the tasks.json path
|
if (!args.id) {
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
return {
|
||||||
|
success: false,
|
||||||
// Parse dependencies if provided
|
error: {
|
||||||
let dependencies = [];
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
if (args.dependencies) {
|
message: 'Parent task ID is required'
|
||||||
dependencies = args.dependencies.split(',').map(id => {
|
}
|
||||||
// Handle both regular IDs and dot notation
|
};
|
||||||
return id.includes('.') ? id.trim() : parseInt(id.trim(), 10);
|
}
|
||||||
});
|
|
||||||
}
|
// Either taskId or title must be provided
|
||||||
|
if (!args.taskId && !args.title) {
|
||||||
// Convert existingTaskId to a number if provided
|
return {
|
||||||
const existingTaskId = args.taskId ? parseInt(args.taskId, 10) : null;
|
success: false,
|
||||||
|
error: {
|
||||||
// Convert parent ID to a number
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
const parentId = parseInt(args.id, 10);
|
message: 'Either taskId or title must be provided'
|
||||||
|
}
|
||||||
// Determine if we should generate files
|
};
|
||||||
const generateFiles = !args.skipGenerate;
|
}
|
||||||
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Find the tasks.json path
|
||||||
enableSilentMode();
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
|
|
||||||
// Case 1: Convert existing task to subtask
|
// Parse dependencies if provided
|
||||||
if (existingTaskId) {
|
let dependencies = [];
|
||||||
log.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);
|
if (args.dependencies) {
|
||||||
const result = await addSubtask(tasksPath, parentId, existingTaskId, null, generateFiles);
|
dependencies = args.dependencies.split(',').map((id) => {
|
||||||
|
// Handle both regular IDs and dot notation
|
||||||
// Restore normal logging
|
return id.includes('.') ? id.trim() : parseInt(id.trim(), 10);
|
||||||
disableSilentMode();
|
});
|
||||||
|
}
|
||||||
return {
|
|
||||||
success: true,
|
// Convert existingTaskId to a number if provided
|
||||||
data: {
|
const existingTaskId = args.taskId ? parseInt(args.taskId, 10) : null;
|
||||||
message: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,
|
|
||||||
subtask: result
|
// Convert parent ID to a number
|
||||||
}
|
const parentId = parseInt(args.id, 10);
|
||||||
};
|
|
||||||
}
|
// Determine if we should generate files
|
||||||
// Case 2: Create new subtask
|
const generateFiles = !args.skipGenerate;
|
||||||
else {
|
|
||||||
log.info(`Creating new subtask for parent task ${parentId}`);
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
|
enableSilentMode();
|
||||||
const newSubtaskData = {
|
|
||||||
title: args.title,
|
// Case 1: Convert existing task to subtask
|
||||||
description: args.description || '',
|
if (existingTaskId) {
|
||||||
details: args.details || '',
|
log.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);
|
||||||
status: args.status || 'pending',
|
const result = await addSubtask(
|
||||||
dependencies: dependencies
|
tasksPath,
|
||||||
};
|
parentId,
|
||||||
|
existingTaskId,
|
||||||
const result = await addSubtask(tasksPath, parentId, null, newSubtaskData, generateFiles);
|
null,
|
||||||
|
generateFiles
|
||||||
// Restore normal logging
|
);
|
||||||
disableSilentMode();
|
|
||||||
|
// Restore normal logging
|
||||||
return {
|
disableSilentMode();
|
||||||
success: true,
|
|
||||||
data: {
|
return {
|
||||||
message: `New subtask ${parentId}.${result.id} successfully created`,
|
success: true,
|
||||||
subtask: result
|
data: {
|
||||||
}
|
message: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,
|
||||||
};
|
subtask: result
|
||||||
}
|
}
|
||||||
} catch (error) {
|
};
|
||||||
// Make sure to restore normal logging even if there's an error
|
}
|
||||||
disableSilentMode();
|
// Case 2: Create new subtask
|
||||||
|
else {
|
||||||
log.error(`Error in addSubtaskDirect: ${error.message}`);
|
log.info(`Creating new subtask for parent task ${parentId}`);
|
||||||
return {
|
|
||||||
success: false,
|
const newSubtaskData = {
|
||||||
error: {
|
title: args.title,
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
description: args.description || '',
|
||||||
message: error.message
|
details: args.details || '',
|
||||||
}
|
status: args.status || 'pending',
|
||||||
};
|
dependencies: dependencies
|
||||||
}
|
};
|
||||||
}
|
|
||||||
|
const result = await addSubtask(
|
||||||
|
tasksPath,
|
||||||
|
parentId,
|
||||||
|
null,
|
||||||
|
newSubtaskData,
|
||||||
|
generateFiles
|
||||||
|
);
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: `New subtask ${parentId}.${result.id} successfully created`,
|
||||||
|
subtask: result
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error in addSubtaskDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,9 +5,19 @@
|
|||||||
|
|
||||||
import { addTask } from '../../../../scripts/modules/task-manager.js';
|
import { addTask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
import { getAnthropicClientForMCP, getModelConfig } from '../utils/ai-client-utils.js';
|
enableSilentMode,
|
||||||
import { _buildAddTaskPrompt, parseTaskJsonResponse, _handleAnthropicStream } from '../../../../scripts/modules/ai-services.js';
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
import {
|
||||||
|
getAnthropicClientForMCP,
|
||||||
|
getModelConfig
|
||||||
|
} from '../utils/ai-client-utils.js';
|
||||||
|
import {
|
||||||
|
_buildAddTaskPrompt,
|
||||||
|
parseTaskJsonResponse,
|
||||||
|
_handleAnthropicStream
|
||||||
|
} from '../../../../scripts/modules/ai-services.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for adding a new task with error handling.
|
* Direct function wrapper for adding a new task with error handling.
|
||||||
@@ -24,153 +34,162 @@ import { _buildAddTaskPrompt, parseTaskJsonResponse, _handleAnthropicStream } fr
|
|||||||
* @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }
|
* @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }
|
||||||
*/
|
*/
|
||||||
export async function addTaskDirect(args, log, context = {}) {
|
export async function addTaskDirect(args, log, context = {}) {
|
||||||
try {
|
try {
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
enableSilentMode();
|
enableSilentMode();
|
||||||
|
|
||||||
// Find the tasks.json path
|
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
|
||||||
|
|
||||||
// Check required parameters
|
|
||||||
if (!args.prompt) {
|
|
||||||
log.error('Missing required parameter: prompt');
|
|
||||||
disableSilentMode();
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'MISSING_PARAMETER',
|
|
||||||
message: 'The prompt parameter is required for adding a task'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract and prepare parameters
|
|
||||||
const prompt = args.prompt;
|
|
||||||
const dependencies = Array.isArray(args.dependencies)
|
|
||||||
? args.dependencies
|
|
||||||
: (args.dependencies ? String(args.dependencies).split(',').map(id => parseInt(id.trim(), 10)) : []);
|
|
||||||
const priority = args.priority || 'medium';
|
|
||||||
|
|
||||||
log.info(`Adding new task with prompt: "${prompt}", dependencies: [${dependencies.join(', ')}], priority: ${priority}`);
|
|
||||||
|
|
||||||
// Extract context parameters for advanced functionality
|
|
||||||
// Commenting out reportProgress extraction
|
|
||||||
// const { reportProgress, session } = context;
|
|
||||||
const { session } = context; // Keep session
|
|
||||||
|
|
||||||
// Initialize AI client with session environment
|
// Find the tasks.json path
|
||||||
let localAnthropic;
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
try {
|
|
||||||
localAnthropic = getAnthropicClientForMCP(session, log);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Failed to initialize Anthropic client: ${error.message}`);
|
|
||||||
disableSilentMode();
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'AI_CLIENT_ERROR',
|
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get model configuration from session
|
// Check required parameters
|
||||||
const modelConfig = getModelConfig(session);
|
if (!args.prompt) {
|
||||||
|
log.error('Missing required parameter: prompt');
|
||||||
|
disableSilentMode();
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'MISSING_PARAMETER',
|
||||||
|
message: 'The prompt parameter is required for adding a task'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Read existing tasks to provide context
|
// Extract and prepare parameters
|
||||||
let tasksData;
|
const prompt = args.prompt;
|
||||||
try {
|
const dependencies = Array.isArray(args.dependencies)
|
||||||
const fs = await import('fs');
|
? args.dependencies
|
||||||
tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
: args.dependencies
|
||||||
} catch (error) {
|
? String(args.dependencies)
|
||||||
log.warn(`Could not read existing tasks for context: ${error.message}`);
|
.split(',')
|
||||||
tasksData = { tasks: [] };
|
.map((id) => parseInt(id.trim(), 10))
|
||||||
}
|
: [];
|
||||||
|
const priority = args.priority || 'medium';
|
||||||
|
|
||||||
// Build prompts for AI
|
log.info(
|
||||||
const { systemPrompt, userPrompt } = _buildAddTaskPrompt(prompt, tasksData.tasks);
|
`Adding new task with prompt: "${prompt}", dependencies: [${dependencies.join(', ')}], priority: ${priority}`
|
||||||
|
);
|
||||||
|
|
||||||
// Make the AI call using the streaming helper
|
// Extract context parameters for advanced functionality
|
||||||
let responseText;
|
// Commenting out reportProgress extraction
|
||||||
try {
|
// const { reportProgress, session } = context;
|
||||||
responseText = await _handleAnthropicStream(
|
const { session } = context; // Keep session
|
||||||
localAnthropic,
|
|
||||||
{
|
|
||||||
model: modelConfig.model,
|
|
||||||
max_tokens: modelConfig.maxTokens,
|
|
||||||
temperature: modelConfig.temperature,
|
|
||||||
messages: [{ role: "user", content: userPrompt }],
|
|
||||||
system: systemPrompt
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// reportProgress: context.reportProgress, // Commented out to prevent Cursor stroking out
|
|
||||||
mcpLog: log
|
|
||||||
}
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`AI processing failed: ${error.message}`);
|
|
||||||
disableSilentMode();
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'AI_PROCESSING_ERROR',
|
|
||||||
message: `Failed to generate task with AI: ${error.message}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the AI response
|
// Initialize AI client with session environment
|
||||||
let taskDataFromAI;
|
let localAnthropic;
|
||||||
try {
|
try {
|
||||||
taskDataFromAI = parseTaskJsonResponse(responseText);
|
localAnthropic = getAnthropicClientForMCP(session, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Failed to parse AI response: ${error.message}`);
|
log.error(`Failed to initialize Anthropic client: ${error.message}`);
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'RESPONSE_PARSING_ERROR',
|
code: 'AI_CLIENT_ERROR',
|
||||||
message: `Failed to parse AI response: ${error.message}`
|
message: `Cannot initialize AI client: ${error.message}`
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call the addTask function with 'json' outputFormat to prevent console output when called via MCP
|
// Get model configuration from session
|
||||||
const newTaskId = await addTask(
|
const modelConfig = getModelConfig(session);
|
||||||
tasksPath,
|
|
||||||
prompt,
|
// Read existing tasks to provide context
|
||||||
dependencies,
|
let tasksData;
|
||||||
priority,
|
try {
|
||||||
{
|
const fs = await import('fs');
|
||||||
// reportProgress, // Commented out
|
tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
||||||
mcpLog: log,
|
} catch (error) {
|
||||||
session,
|
log.warn(`Could not read existing tasks for context: ${error.message}`);
|
||||||
taskDataFromAI // Pass the parsed AI result
|
tasksData = { tasks: [] };
|
||||||
},
|
}
|
||||||
'json'
|
|
||||||
);
|
// Build prompts for AI
|
||||||
|
const { systemPrompt, userPrompt } = _buildAddTaskPrompt(
|
||||||
// Restore normal logging
|
prompt,
|
||||||
disableSilentMode();
|
tasksData.tasks
|
||||||
|
);
|
||||||
return {
|
|
||||||
success: true,
|
// Make the AI call using the streaming helper
|
||||||
data: {
|
let responseText;
|
||||||
taskId: newTaskId,
|
try {
|
||||||
message: `Successfully added new task #${newTaskId}`
|
responseText = await _handleAnthropicStream(
|
||||||
}
|
localAnthropic,
|
||||||
};
|
{
|
||||||
} catch (error) {
|
model: modelConfig.model,
|
||||||
// Make sure to restore normal logging even if there's an error
|
max_tokens: modelConfig.maxTokens,
|
||||||
disableSilentMode();
|
temperature: modelConfig.temperature,
|
||||||
|
messages: [{ role: 'user', content: userPrompt }],
|
||||||
log.error(`Error in addTaskDirect: ${error.message}`);
|
system: systemPrompt
|
||||||
return {
|
},
|
||||||
success: false,
|
{
|
||||||
error: {
|
// reportProgress: context.reportProgress, // Commented out to prevent Cursor stroking out
|
||||||
code: 'ADD_TASK_ERROR',
|
mcpLog: log
|
||||||
message: error.message
|
}
|
||||||
}
|
);
|
||||||
};
|
} catch (error) {
|
||||||
}
|
log.error(`AI processing failed: ${error.message}`);
|
||||||
}
|
disableSilentMode();
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'AI_PROCESSING_ERROR',
|
||||||
|
message: `Failed to generate task with AI: ${error.message}`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the AI response
|
||||||
|
let taskDataFromAI;
|
||||||
|
try {
|
||||||
|
taskDataFromAI = parseTaskJsonResponse(responseText);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Failed to parse AI response: ${error.message}`);
|
||||||
|
disableSilentMode();
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'RESPONSE_PARSING_ERROR',
|
||||||
|
message: `Failed to parse AI response: ${error.message}`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the addTask function with 'json' outputFormat to prevent console output when called via MCP
|
||||||
|
const newTaskId = await addTask(
|
||||||
|
tasksPath,
|
||||||
|
prompt,
|
||||||
|
dependencies,
|
||||||
|
priority,
|
||||||
|
{
|
||||||
|
// reportProgress, // Commented out
|
||||||
|
mcpLog: log,
|
||||||
|
session,
|
||||||
|
taskDataFromAI // Pass the parsed AI result
|
||||||
|
},
|
||||||
|
'json'
|
||||||
|
);
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
taskId: newTaskId,
|
||||||
|
message: `Successfully added new task #${newTaskId}`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error in addTaskDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'ADD_TASK_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,7 +4,12 @@
|
|||||||
|
|
||||||
import { analyzeTaskComplexity } from '../../../../scripts/modules/task-manager.js';
|
import { analyzeTaskComplexity } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode, isSilentMode, readJSON } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode,
|
||||||
|
isSilentMode,
|
||||||
|
readJSON
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
|
||||||
@@ -22,135 +27,142 @@ import path from 'path';
|
|||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function analyzeTaskComplexityDirect(args, log, context = {}) {
|
export async function analyzeTaskComplexityDirect(args, log, context = {}) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
const { session } = context; // Only extract session, not reportProgress
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
|
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Find the tasks.json path
|
// Find the tasks.json path
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
|
|
||||||
// Determine output path
|
// Determine output path
|
||||||
let outputPath = args.output || 'scripts/task-complexity-report.json';
|
let outputPath = args.output || 'scripts/task-complexity-report.json';
|
||||||
if (!path.isAbsolute(outputPath) && args.projectRoot) {
|
if (!path.isAbsolute(outputPath) && args.projectRoot) {
|
||||||
outputPath = path.join(args.projectRoot, outputPath);
|
outputPath = path.join(args.projectRoot, outputPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(`Analyzing task complexity from: ${tasksPath}`);
|
log.info(`Analyzing task complexity from: ${tasksPath}`);
|
||||||
log.info(`Output report will be saved to: ${outputPath}`);
|
log.info(`Output report will be saved to: ${outputPath}`);
|
||||||
|
|
||||||
if (args.research) {
|
if (args.research) {
|
||||||
log.info('Using Perplexity AI for research-backed complexity analysis');
|
log.info('Using Perplexity AI for research-backed complexity analysis');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create options object for analyzeTaskComplexity
|
// Create options object for analyzeTaskComplexity
|
||||||
const options = {
|
const options = {
|
||||||
file: tasksPath,
|
file: tasksPath,
|
||||||
output: outputPath,
|
output: outputPath,
|
||||||
model: args.model,
|
model: args.model,
|
||||||
threshold: args.threshold,
|
threshold: args.threshold,
|
||||||
research: args.research === true
|
research: args.research === true
|
||||||
};
|
};
|
||||||
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
const wasSilent = isSilentMode();
|
const wasSilent = isSilentMode();
|
||||||
if (!wasSilent) {
|
if (!wasSilent) {
|
||||||
enableSilentMode();
|
enableSilentMode();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a logWrapper that matches the expected mcpLog interface as specified in utilities.mdc
|
// Create a logWrapper that matches the expected mcpLog interface as specified in utilities.mdc
|
||||||
const logWrapper = {
|
const logWrapper = {
|
||||||
info: (message, ...args) => log.info(message, ...args),
|
info: (message, ...args) => log.info(message, ...args),
|
||||||
warn: (message, ...args) => log.warn(message, ...args),
|
warn: (message, ...args) => log.warn(message, ...args),
|
||||||
error: (message, ...args) => log.error(message, ...args),
|
error: (message, ...args) => log.error(message, ...args),
|
||||||
debug: (message, ...args) => log.debug && log.debug(message, ...args),
|
debug: (message, ...args) => log.debug && log.debug(message, ...args),
|
||||||
success: (message, ...args) => log.info(message, ...args) // Map success to info
|
success: (message, ...args) => log.info(message, ...args) // Map success to info
|
||||||
};
|
};
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Call the core function with session and logWrapper as mcpLog
|
// Call the core function with session and logWrapper as mcpLog
|
||||||
await analyzeTaskComplexity(options, {
|
await analyzeTaskComplexity(options, {
|
||||||
session,
|
session,
|
||||||
mcpLog: logWrapper // Use the wrapper instead of passing log directly
|
mcpLog: logWrapper // Use the wrapper instead of passing log directly
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error in analyzeTaskComplexity: ${error.message}`);
|
log.error(`Error in analyzeTaskComplexity: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'ANALYZE_ERROR',
|
code: 'ANALYZE_ERROR',
|
||||||
message: `Error running complexity analysis: ${error.message}`
|
message: `Error running complexity analysis: ${error.message}`
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} finally {
|
} finally {
|
||||||
// Always restore normal logging in finally block, but only if we enabled it
|
// Always restore normal logging in finally block, but only if we enabled it
|
||||||
if (!wasSilent) {
|
if (!wasSilent) {
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the report file was created
|
// Verify the report file was created
|
||||||
if (!fs.existsSync(outputPath)) {
|
if (!fs.existsSync(outputPath)) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'ANALYZE_ERROR',
|
code: 'ANALYZE_ERROR',
|
||||||
message: 'Analysis completed but no report file was created'
|
message: 'Analysis completed but no report file was created'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the report file
|
// Read the report file
|
||||||
let report;
|
let report;
|
||||||
try {
|
try {
|
||||||
report = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
report = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||||
|
|
||||||
// Important: Handle different report formats
|
// Important: Handle different report formats
|
||||||
// The core function might return an array or an object with a complexityAnalysis property
|
// The core function might return an array or an object with a complexityAnalysis property
|
||||||
const analysisArray = Array.isArray(report) ? report :
|
const analysisArray = Array.isArray(report)
|
||||||
(report.complexityAnalysis || []);
|
? report
|
||||||
|
: report.complexityAnalysis || [];
|
||||||
// Count tasks by complexity
|
|
||||||
const highComplexityTasks = analysisArray.filter(t => t.complexityScore >= 8).length;
|
// Count tasks by complexity
|
||||||
const mediumComplexityTasks = analysisArray.filter(t => t.complexityScore >= 5 && t.complexityScore < 8).length;
|
const highComplexityTasks = analysisArray.filter(
|
||||||
const lowComplexityTasks = analysisArray.filter(t => t.complexityScore < 5).length;
|
(t) => t.complexityScore >= 8
|
||||||
|
).length;
|
||||||
return {
|
const mediumComplexityTasks = analysisArray.filter(
|
||||||
success: true,
|
(t) => t.complexityScore >= 5 && t.complexityScore < 8
|
||||||
data: {
|
).length;
|
||||||
message: `Task complexity analysis complete. Report saved to ${outputPath}`,
|
const lowComplexityTasks = analysisArray.filter(
|
||||||
reportPath: outputPath,
|
(t) => t.complexityScore < 5
|
||||||
reportSummary: {
|
).length;
|
||||||
taskCount: analysisArray.length,
|
|
||||||
highComplexityTasks,
|
return {
|
||||||
mediumComplexityTasks,
|
success: true,
|
||||||
lowComplexityTasks
|
data: {
|
||||||
}
|
message: `Task complexity analysis complete. Report saved to ${outputPath}`,
|
||||||
}
|
reportPath: outputPath,
|
||||||
};
|
reportSummary: {
|
||||||
} catch (parseError) {
|
taskCount: analysisArray.length,
|
||||||
log.error(`Error parsing report file: ${parseError.message}`);
|
highComplexityTasks,
|
||||||
return {
|
mediumComplexityTasks,
|
||||||
success: false,
|
lowComplexityTasks
|
||||||
error: {
|
}
|
||||||
code: 'REPORT_PARSE_ERROR',
|
}
|
||||||
message: `Error parsing complexity report: ${parseError.message}`
|
};
|
||||||
}
|
} catch (parseError) {
|
||||||
};
|
log.error(`Error parsing report file: ${parseError.message}`);
|
||||||
}
|
return {
|
||||||
} catch (error) {
|
success: false,
|
||||||
// Make sure to restore normal logging even if there's an error
|
error: {
|
||||||
if (isSilentMode()) {
|
code: 'REPORT_PARSE_ERROR',
|
||||||
disableSilentMode();
|
message: `Error parsing complexity report: ${parseError.message}`
|
||||||
}
|
}
|
||||||
|
};
|
||||||
log.error(`Error in analyzeTaskComplexityDirect: ${error.message}`);
|
}
|
||||||
return {
|
} catch (error) {
|
||||||
success: false,
|
// Make sure to restore normal logging even if there's an error
|
||||||
error: {
|
if (isSilentMode()) {
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
disableSilentMode();
|
||||||
message: error.message
|
}
|
||||||
}
|
|
||||||
};
|
log.error(`Error in analyzeTaskComplexityDirect: ${error.message}`);
|
||||||
}
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,21 +12,21 @@ import { contextManager } from '../context-manager.js';
|
|||||||
* @returns {Object} - Cache statistics
|
* @returns {Object} - Cache statistics
|
||||||
*/
|
*/
|
||||||
export async function getCacheStatsDirect(args, log) {
|
export async function getCacheStatsDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info('Retrieving cache statistics');
|
log.info('Retrieving cache statistics');
|
||||||
const stats = contextManager.getStats();
|
const stats = contextManager.getStats();
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: stats
|
data: stats
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error getting cache stats: ${error.message}`);
|
log.error(`Error getting cache stats: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'CACHE_STATS_ERROR',
|
code: 'CACHE_STATS_ERROR',
|
||||||
message: error.message || 'Unknown error occurred'
|
message: error.message || 'Unknown error occurred'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,10 @@
|
|||||||
|
|
||||||
import { clearSubtasks } from '../../../../scripts/modules/task-manager.js';
|
import { clearSubtasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -18,95 +21,96 @@ import fs from 'fs';
|
|||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function clearSubtasksDirect(args, log) {
|
export async function clearSubtasksDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Either id or all must be provided
|
|
||||||
if (!args.id && !args.all) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
|
||||||
message: 'Either task IDs with id parameter or all parameter must be provided'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the tasks.json path
|
// Either id or all must be provided
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
if (!args.id && !args.all) {
|
||||||
|
return {
|
||||||
// Check if tasks.json exists
|
success: false,
|
||||||
if (!fs.existsSync(tasksPath)) {
|
error: {
|
||||||
return {
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
success: false,
|
message:
|
||||||
error: {
|
'Either task IDs with id parameter or all parameter must be provided'
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
}
|
||||||
message: `Tasks file not found at ${tasksPath}`
|
};
|
||||||
}
|
}
|
||||||
};
|
|
||||||
}
|
// Find the tasks.json path
|
||||||
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
let taskIds;
|
|
||||||
|
// Check if tasks.json exists
|
||||||
// If all is specified, get all task IDs
|
if (!fs.existsSync(tasksPath)) {
|
||||||
if (args.all) {
|
return {
|
||||||
log.info('Clearing subtasks from all tasks');
|
success: false,
|
||||||
const data = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
error: {
|
||||||
if (!data || !data.tasks || data.tasks.length === 0) {
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
return {
|
message: `Tasks file not found at ${tasksPath}`
|
||||||
success: false,
|
}
|
||||||
error: {
|
};
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
}
|
||||||
message: 'No valid tasks found in the tasks file'
|
|
||||||
}
|
let taskIds;
|
||||||
};
|
|
||||||
}
|
// If all is specified, get all task IDs
|
||||||
taskIds = data.tasks.map(t => t.id).join(',');
|
if (args.all) {
|
||||||
} else {
|
log.info('Clearing subtasks from all tasks');
|
||||||
// Use the provided task IDs
|
const data = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
||||||
taskIds = args.id;
|
if (!data || !data.tasks || data.tasks.length === 0) {
|
||||||
}
|
return {
|
||||||
|
success: false,
|
||||||
log.info(`Clearing subtasks from tasks: ${taskIds}`);
|
error: {
|
||||||
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
message: 'No valid tasks found in the tasks file'
|
||||||
enableSilentMode();
|
}
|
||||||
|
};
|
||||||
// Call the core function
|
}
|
||||||
clearSubtasks(tasksPath, taskIds);
|
taskIds = data.tasks.map((t) => t.id).join(',');
|
||||||
|
} else {
|
||||||
// Restore normal logging
|
// Use the provided task IDs
|
||||||
disableSilentMode();
|
taskIds = args.id;
|
||||||
|
}
|
||||||
// Read the updated data to provide a summary
|
|
||||||
const updatedData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
log.info(`Clearing subtasks from tasks: ${taskIds}`);
|
||||||
const taskIdArray = taskIds.split(',').map(id => parseInt(id.trim(), 10));
|
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
// Build a summary of what was done
|
enableSilentMode();
|
||||||
const clearedTasksCount = taskIdArray.length;
|
|
||||||
const taskSummary = taskIdArray.map(id => {
|
// Call the core function
|
||||||
const task = updatedData.tasks.find(t => t.id === id);
|
clearSubtasks(tasksPath, taskIds);
|
||||||
return task ? { id, title: task.title } : { id, title: 'Task not found' };
|
|
||||||
});
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
return {
|
|
||||||
success: true,
|
// Read the updated data to provide a summary
|
||||||
data: {
|
const updatedData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));
|
||||||
message: `Successfully cleared subtasks from ${clearedTasksCount} task(s)`,
|
const taskIdArray = taskIds.split(',').map((id) => parseInt(id.trim(), 10));
|
||||||
tasksCleared: taskSummary
|
|
||||||
}
|
// Build a summary of what was done
|
||||||
};
|
const clearedTasksCount = taskIdArray.length;
|
||||||
} catch (error) {
|
const taskSummary = taskIdArray.map((id) => {
|
||||||
// Make sure to restore normal logging even if there's an error
|
const task = updatedData.tasks.find((t) => t.id === id);
|
||||||
disableSilentMode();
|
return task ? { id, title: task.title } : { id, title: 'Task not found' };
|
||||||
|
});
|
||||||
log.error(`Error in clearSubtasksDirect: ${error.message}`);
|
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: true,
|
||||||
error: {
|
data: {
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
message: `Successfully cleared subtasks from ${clearedTasksCount} task(s)`,
|
||||||
message: error.message
|
tasksCleared: taskSummary
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
} catch (error) {
|
||||||
}
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error in clearSubtasksDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,119 +3,131 @@
|
|||||||
* Direct function implementation for displaying complexity analysis report
|
* Direct function implementation for displaying complexity analysis report
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { readComplexityReport, enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
readComplexityReport,
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing file path option
|
* @param {Object} args - Command arguments containing file path option
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information
|
* @returns {Promise<Object>} - Result object with success status and data/error information
|
||||||
*/
|
*/
|
||||||
export async function complexityReportDirect(args, log) {
|
export async function complexityReportDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
|
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Get tasks file path to determine project root for the default report location
|
|
||||||
let tasksPath;
|
|
||||||
try {
|
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
|
||||||
} catch (error) {
|
|
||||||
log.warn(`Tasks file not found, using current directory: ${error.message}`);
|
|
||||||
// Continue with default or specified report path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get report file path from args or use default
|
// Get tasks file path to determine project root for the default report location
|
||||||
const reportPath = args.file || path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
|
let tasksPath;
|
||||||
|
try {
|
||||||
log.info(`Looking for complexity report at: ${reportPath}`);
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
|
} catch (error) {
|
||||||
// Generate cache key based on report path
|
log.warn(
|
||||||
const cacheKey = `complexityReport:${reportPath}`;
|
`Tasks file not found, using current directory: ${error.message}`
|
||||||
|
);
|
||||||
// Define the core action function to read the report
|
// Continue with default or specified report path
|
||||||
const coreActionFn = async () => {
|
}
|
||||||
try {
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
|
||||||
|
|
||||||
const report = readComplexityReport(reportPath);
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
if (!report) {
|
|
||||||
log.warn(`No complexity report found at ${reportPath}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
|
||||||
message: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
report,
|
|
||||||
reportPath
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error reading complexity report: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'READ_ERROR',
|
|
||||||
message: error.message
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the caching utility
|
// Get report file path from args or use default
|
||||||
try {
|
const reportPath =
|
||||||
const result = await getCachedOrExecute({
|
args.file ||
|
||||||
cacheKey,
|
path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
|
||||||
actionFn: coreActionFn,
|
|
||||||
log
|
log.info(`Looking for complexity report at: ${reportPath}`);
|
||||||
});
|
|
||||||
log.info(`complexityReportDirect completed. From cache: ${result.fromCache}`);
|
// Generate cache key based on report path
|
||||||
return result; // Returns { success, data/error, fromCache }
|
const cacheKey = `complexityReport:${reportPath}`;
|
||||||
} catch (error) {
|
|
||||||
// Catch unexpected errors from getCachedOrExecute itself
|
// Define the core action function to read the report
|
||||||
// Ensure silent mode is disabled
|
const coreActionFn = async () => {
|
||||||
disableSilentMode();
|
try {
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
log.error(`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`);
|
enableSilentMode();
|
||||||
return {
|
|
||||||
success: false,
|
const report = readComplexityReport(reportPath);
|
||||||
error: {
|
|
||||||
code: 'UNEXPECTED_ERROR',
|
// Restore normal logging
|
||||||
message: error.message
|
disableSilentMode();
|
||||||
},
|
|
||||||
fromCache: false
|
if (!report) {
|
||||||
};
|
log.warn(`No complexity report found at ${reportPath}`);
|
||||||
}
|
return {
|
||||||
} catch (error) {
|
success: false,
|
||||||
// Ensure silent mode is disabled if an outer error occurs
|
error: {
|
||||||
disableSilentMode();
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
|
message: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`
|
||||||
log.error(`Error in complexityReportDirect: ${error.message}`);
|
}
|
||||||
return {
|
};
|
||||||
success: false,
|
}
|
||||||
error: {
|
|
||||||
code: 'UNEXPECTED_ERROR',
|
return {
|
||||||
message: error.message
|
success: true,
|
||||||
},
|
data: {
|
||||||
fromCache: false
|
report,
|
||||||
};
|
reportPath
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error reading complexity report: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'READ_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use the caching utility
|
||||||
|
try {
|
||||||
|
const result = await getCachedOrExecute({
|
||||||
|
cacheKey,
|
||||||
|
actionFn: coreActionFn,
|
||||||
|
log
|
||||||
|
});
|
||||||
|
log.info(
|
||||||
|
`complexityReportDirect completed. From cache: ${result.fromCache}`
|
||||||
|
);
|
||||||
|
return result; // Returns { success, data/error, fromCache }
|
||||||
|
} catch (error) {
|
||||||
|
// Catch unexpected errors from getCachedOrExecute itself
|
||||||
|
// Ensure silent mode is disabled
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(
|
||||||
|
`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'UNEXPECTED_ERROR',
|
||||||
|
message: error.message
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Ensure silent mode is disabled if an outer error occurs
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error in complexityReportDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'UNEXPECTED_ERROR',
|
||||||
|
message: error.message
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,7 +3,11 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { expandAllTasks } from '../../../../scripts/modules/task-manager.js';
|
import { expandAllTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode,
|
||||||
|
isSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { getAnthropicClientForMCP } from '../utils/ai-client-utils.js';
|
import { getAnthropicClientForMCP } from '../utils/ai-client-utils.js';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
@@ -23,98 +27,100 @@ import fs from 'fs';
|
|||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function expandAllTasksDirect(args, log, context = {}) {
|
export async function expandAllTasksDirect(args, log, context = {}) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
const { session } = context; // Only extract session, not reportProgress
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Enable silent mode early to prevent any console output
|
// Enable silent mode early to prevent any console output
|
||||||
enableSilentMode();
|
enableSilentMode();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Find the tasks.json path
|
// Find the tasks.json path
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
|
|
||||||
// Parse parameters
|
// Parse parameters
|
||||||
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
||||||
const useResearch = args.research === true;
|
const useResearch = args.research === true;
|
||||||
const additionalContext = args.prompt || '';
|
const additionalContext = args.prompt || '';
|
||||||
const forceFlag = args.force === true;
|
const forceFlag = args.force === true;
|
||||||
|
|
||||||
log.info(`Expanding all tasks with ${numSubtasks || 'default'} subtasks each...`);
|
log.info(
|
||||||
|
`Expanding all tasks with ${numSubtasks || 'default'} subtasks each...`
|
||||||
if (useResearch) {
|
);
|
||||||
log.info('Using Perplexity AI for research-backed subtask generation');
|
|
||||||
|
if (useResearch) {
|
||||||
// Initialize AI client for research-backed expansion
|
log.info('Using Perplexity AI for research-backed subtask generation');
|
||||||
try {
|
|
||||||
await getAnthropicClientForMCP(session, log);
|
// Initialize AI client for research-backed expansion
|
||||||
} catch (error) {
|
try {
|
||||||
// Ensure silent mode is disabled before returning error
|
await getAnthropicClientForMCP(session, log);
|
||||||
disableSilentMode();
|
} catch (error) {
|
||||||
|
// Ensure silent mode is disabled before returning error
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
disableSilentMode();
|
||||||
return {
|
|
||||||
success: false,
|
log.error(`Failed to initialize AI client: ${error.message}`);
|
||||||
error: {
|
return {
|
||||||
code: 'AI_CLIENT_ERROR',
|
success: false,
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
error: {
|
||||||
}
|
code: 'AI_CLIENT_ERROR',
|
||||||
};
|
message: `Cannot initialize AI client: ${error.message}`
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
}
|
||||||
if (additionalContext) {
|
}
|
||||||
log.info(`Additional context: "${additionalContext}"`);
|
|
||||||
}
|
if (additionalContext) {
|
||||||
if (forceFlag) {
|
log.info(`Additional context: "${additionalContext}"`);
|
||||||
log.info('Force regeneration of subtasks is enabled');
|
}
|
||||||
}
|
if (forceFlag) {
|
||||||
|
log.info('Force regeneration of subtasks is enabled');
|
||||||
// Call the core function with session context for AI operations
|
}
|
||||||
// and outputFormat as 'json' to prevent UI elements
|
|
||||||
const result = await expandAllTasks(
|
// Call the core function with session context for AI operations
|
||||||
tasksPath,
|
// and outputFormat as 'json' to prevent UI elements
|
||||||
numSubtasks,
|
const result = await expandAllTasks(
|
||||||
useResearch,
|
tasksPath,
|
||||||
additionalContext,
|
numSubtasks,
|
||||||
forceFlag,
|
useResearch,
|
||||||
{ mcpLog: log, session },
|
additionalContext,
|
||||||
'json' // Use JSON output format to prevent UI elements
|
forceFlag,
|
||||||
);
|
{ mcpLog: log, session },
|
||||||
|
'json' // Use JSON output format to prevent UI elements
|
||||||
// The expandAllTasks function now returns a result object
|
);
|
||||||
return {
|
|
||||||
success: true,
|
// The expandAllTasks function now returns a result object
|
||||||
data: {
|
return {
|
||||||
message: "Successfully expanded all pending tasks with subtasks",
|
success: true,
|
||||||
details: {
|
data: {
|
||||||
numSubtasks: numSubtasks,
|
message: 'Successfully expanded all pending tasks with subtasks',
|
||||||
research: useResearch,
|
details: {
|
||||||
prompt: additionalContext,
|
numSubtasks: numSubtasks,
|
||||||
force: forceFlag,
|
research: useResearch,
|
||||||
tasksExpanded: result.expandedCount,
|
prompt: additionalContext,
|
||||||
totalEligibleTasks: result.tasksToExpand
|
force: forceFlag,
|
||||||
}
|
tasksExpanded: result.expandedCount,
|
||||||
}
|
totalEligibleTasks: result.tasksToExpand
|
||||||
};
|
}
|
||||||
} finally {
|
}
|
||||||
// Restore normal logging in finally block to ensure it runs even if there's an error
|
};
|
||||||
disableSilentMode();
|
} finally {
|
||||||
}
|
// Restore normal logging in finally block to ensure it runs even if there's an error
|
||||||
} catch (error) {
|
disableSilentMode();
|
||||||
// Ensure silent mode is disabled if an error occurs
|
}
|
||||||
if (isSilentMode()) {
|
} catch (error) {
|
||||||
disableSilentMode();
|
// Ensure silent mode is disabled if an error occurs
|
||||||
}
|
if (isSilentMode()) {
|
||||||
|
disableSilentMode();
|
||||||
log.error(`Error in expandAllTasksDirect: ${error.message}`);
|
}
|
||||||
return {
|
|
||||||
success: false,
|
log.error(`Error in expandAllTasksDirect: ${error.message}`);
|
||||||
error: {
|
return {
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
success: false,
|
||||||
message: error.message
|
error: {
|
||||||
}
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
};
|
message: error.message
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,9 +4,18 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { expandTask } from '../../../../scripts/modules/task-manager.js';
|
import { expandTask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { readJSON, writeJSON, enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
readJSON,
|
||||||
|
writeJSON,
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode,
|
||||||
|
isSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { getAnthropicClientForMCP, getModelConfig } from '../utils/ai-client-utils.js';
|
import {
|
||||||
|
getAnthropicClientForMCP,
|
||||||
|
getModelConfig
|
||||||
|
} from '../utils/ai-client-utils.js';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
@@ -19,231 +28,248 @@ import fs from 'fs';
|
|||||||
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||||
*/
|
*/
|
||||||
export async function expandTaskDirect(args, log, context = {}) {
|
export async function expandTaskDirect(args, log, context = {}) {
|
||||||
const { session } = context;
|
const { session } = context;
|
||||||
|
|
||||||
// Log session root data for debugging
|
|
||||||
log.info(`Session data in expandTaskDirect: ${JSON.stringify({
|
|
||||||
hasSession: !!session,
|
|
||||||
sessionKeys: session ? Object.keys(session) : [],
|
|
||||||
roots: session?.roots,
|
|
||||||
rootsStr: JSON.stringify(session?.roots)
|
|
||||||
})}`);
|
|
||||||
|
|
||||||
let tasksPath;
|
|
||||||
try {
|
|
||||||
// If a direct file path is provided, use it directly
|
|
||||||
if (args.file && fs.existsSync(args.file)) {
|
|
||||||
log.info(`[expandTaskDirect] Using explicitly provided tasks file: ${args.file}`);
|
|
||||||
tasksPath = args.file;
|
|
||||||
} else {
|
|
||||||
// Find the tasks path through standard logic
|
|
||||||
log.info(`[expandTaskDirect] No direct file path provided or file not found at ${args.file}, searching using findTasksJsonPath`);
|
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`[expandTaskDirect] Error during tasksPath determination: ${error.message}`);
|
|
||||||
|
|
||||||
// Include session roots information in error
|
|
||||||
const sessionRootsInfo = session ?
|
|
||||||
`\nSession.roots: ${JSON.stringify(session.roots)}\n` +
|
|
||||||
`Current Working Directory: ${process.cwd()}\n` +
|
|
||||||
`Args.projectRoot: ${args.projectRoot}\n` +
|
|
||||||
`Args.file: ${args.file}\n` :
|
|
||||||
'\nSession object not available';
|
|
||||||
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
|
||||||
message: `Error determining tasksPath: ${error.message}${sessionRootsInfo}`
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info(`[expandTaskDirect] Determined tasksPath: ${tasksPath}`);
|
// Log session root data for debugging
|
||||||
|
log.info(
|
||||||
|
`Session data in expandTaskDirect: ${JSON.stringify({
|
||||||
|
hasSession: !!session,
|
||||||
|
sessionKeys: session ? Object.keys(session) : [],
|
||||||
|
roots: session?.roots,
|
||||||
|
rootsStr: JSON.stringify(session?.roots)
|
||||||
|
})}`
|
||||||
|
);
|
||||||
|
|
||||||
// Validate task ID
|
let tasksPath;
|
||||||
const taskId = args.id ? parseInt(args.id, 10) : null;
|
try {
|
||||||
if (!taskId) {
|
// If a direct file path is provided, use it directly
|
||||||
log.error('Task ID is required');
|
if (args.file && fs.existsSync(args.file)) {
|
||||||
return {
|
log.info(
|
||||||
success: false,
|
`[expandTaskDirect] Using explicitly provided tasks file: ${args.file}`
|
||||||
error: {
|
);
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
tasksPath = args.file;
|
||||||
message: 'Task ID is required'
|
} else {
|
||||||
},
|
// Find the tasks path through standard logic
|
||||||
fromCache: false
|
log.info(
|
||||||
};
|
`[expandTaskDirect] No direct file path provided or file not found at ${args.file}, searching using findTasksJsonPath`
|
||||||
}
|
);
|
||||||
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
log.error(
|
||||||
|
`[expandTaskDirect] Error during tasksPath determination: ${error.message}`
|
||||||
|
);
|
||||||
|
|
||||||
// Process other parameters
|
// Include session roots information in error
|
||||||
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
const sessionRootsInfo = session
|
||||||
const useResearch = args.research === true;
|
? `\nSession.roots: ${JSON.stringify(session.roots)}\n` +
|
||||||
const additionalContext = args.prompt || '';
|
`Current Working Directory: ${process.cwd()}\n` +
|
||||||
|
`Args.projectRoot: ${args.projectRoot}\n` +
|
||||||
|
`Args.file: ${args.file}\n`
|
||||||
|
: '\nSession object not available';
|
||||||
|
|
||||||
// Initialize AI client if needed (for expandTask function)
|
return {
|
||||||
try {
|
success: false,
|
||||||
// This ensures the AI client is available by checking it
|
error: {
|
||||||
if (useResearch) {
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
log.info('Verifying AI client for research-backed expansion');
|
message: `Error determining tasksPath: ${error.message}${sessionRootsInfo}`
|
||||||
await getAnthropicClientForMCP(session, log);
|
},
|
||||||
}
|
fromCache: false
|
||||||
} catch (error) {
|
};
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
}
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'AI_CLIENT_ERROR',
|
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
log.info(`[expandTaskDirect] Determined tasksPath: ${tasksPath}`);
|
||||||
log.info(`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}`);
|
|
||||||
|
|
||||||
// Read tasks data
|
|
||||||
log.info(`[expandTaskDirect] Attempting to read JSON from: ${tasksPath}`);
|
|
||||||
const data = readJSON(tasksPath);
|
|
||||||
log.info(`[expandTaskDirect] Result of readJSON: ${data ? 'Data read successfully' : 'readJSON returned null or undefined'}`);
|
|
||||||
|
|
||||||
if (!data || !data.tasks) {
|
// Validate task ID
|
||||||
log.error(`[expandTaskDirect] readJSON failed or returned invalid data for path: ${tasksPath}`);
|
const taskId = args.id ? parseInt(args.id, 10) : null;
|
||||||
return {
|
if (!taskId) {
|
||||||
success: false,
|
log.error('Task ID is required');
|
||||||
error: {
|
return {
|
||||||
code: 'INVALID_TASKS_FILE',
|
success: false,
|
||||||
message: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`
|
error: {
|
||||||
},
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
fromCache: false
|
message: 'Task ID is required'
|
||||||
};
|
},
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Find the specific task
|
}
|
||||||
log.info(`[expandTaskDirect] Searching for task ID ${taskId} in data`);
|
|
||||||
const task = data.tasks.find(t => t.id === taskId);
|
// Process other parameters
|
||||||
log.info(`[expandTaskDirect] Task found: ${task ? 'Yes' : 'No'}`);
|
const numSubtasks = args.num ? parseInt(args.num, 10) : undefined;
|
||||||
|
const useResearch = args.research === true;
|
||||||
if (!task) {
|
const additionalContext = args.prompt || '';
|
||||||
return {
|
|
||||||
success: false,
|
// Initialize AI client if needed (for expandTask function)
|
||||||
error: {
|
try {
|
||||||
code: 'TASK_NOT_FOUND',
|
// This ensures the AI client is available by checking it
|
||||||
message: `Task with ID ${taskId} not found`
|
if (useResearch) {
|
||||||
},
|
log.info('Verifying AI client for research-backed expansion');
|
||||||
fromCache: false
|
await getAnthropicClientForMCP(session, log);
|
||||||
};
|
}
|
||||||
}
|
} catch (error) {
|
||||||
|
log.error(`Failed to initialize AI client: ${error.message}`);
|
||||||
// Check if task is completed
|
return {
|
||||||
if (task.status === 'done' || task.status === 'completed') {
|
success: false,
|
||||||
return {
|
error: {
|
||||||
success: false,
|
code: 'AI_CLIENT_ERROR',
|
||||||
error: {
|
message: `Cannot initialize AI client: ${error.message}`
|
||||||
code: 'TASK_COMPLETED',
|
},
|
||||||
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
|
fromCache: false
|
||||||
},
|
};
|
||||||
fromCache: false
|
}
|
||||||
};
|
|
||||||
}
|
try {
|
||||||
|
log.info(
|
||||||
// Check for existing subtasks
|
`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}`
|
||||||
const hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;
|
);
|
||||||
|
|
||||||
// If the task already has subtasks, just return it (matching core behavior)
|
// Read tasks data
|
||||||
if (hasExistingSubtasks) {
|
log.info(`[expandTaskDirect] Attempting to read JSON from: ${tasksPath}`);
|
||||||
log.info(`Task ${taskId} already has ${task.subtasks.length} subtasks`);
|
const data = readJSON(tasksPath);
|
||||||
return {
|
log.info(
|
||||||
success: true,
|
`[expandTaskDirect] Result of readJSON: ${data ? 'Data read successfully' : 'readJSON returned null or undefined'}`
|
||||||
data: {
|
);
|
||||||
task,
|
|
||||||
subtasksAdded: 0,
|
if (!data || !data.tasks) {
|
||||||
hasExistingSubtasks
|
log.error(
|
||||||
},
|
`[expandTaskDirect] readJSON failed or returned invalid data for path: ${tasksPath}`
|
||||||
fromCache: false
|
);
|
||||||
};
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: {
|
||||||
// Keep a copy of the task before modification
|
code: 'INVALID_TASKS_FILE',
|
||||||
const originalTask = JSON.parse(JSON.stringify(task));
|
message: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`
|
||||||
|
},
|
||||||
// Tracking subtasks count before expansion
|
fromCache: false
|
||||||
const subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;
|
};
|
||||||
|
}
|
||||||
// Create a backup of the tasks.json file
|
|
||||||
const backupPath = path.join(path.dirname(tasksPath), 'tasks.json.bak');
|
// Find the specific task
|
||||||
fs.copyFileSync(tasksPath, backupPath);
|
log.info(`[expandTaskDirect] Searching for task ID ${taskId} in data`);
|
||||||
|
const task = data.tasks.find((t) => t.id === taskId);
|
||||||
// Directly modify the data instead of calling the CLI function
|
log.info(`[expandTaskDirect] Task found: ${task ? 'Yes' : 'No'}`);
|
||||||
if (!task.subtasks) {
|
|
||||||
task.subtasks = [];
|
if (!task) {
|
||||||
}
|
return {
|
||||||
|
success: false,
|
||||||
// Save tasks.json with potentially empty subtasks array
|
error: {
|
||||||
writeJSON(tasksPath, data);
|
code: 'TASK_NOT_FOUND',
|
||||||
|
message: `Task with ID ${taskId} not found`
|
||||||
// Process the request
|
},
|
||||||
try {
|
fromCache: false
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
};
|
||||||
enableSilentMode();
|
}
|
||||||
|
|
||||||
// Call expandTask with session context to ensure AI client is properly initialized
|
// Check if task is completed
|
||||||
const result = await expandTask(
|
if (task.status === 'done' || task.status === 'completed') {
|
||||||
tasksPath,
|
return {
|
||||||
taskId,
|
success: false,
|
||||||
numSubtasks,
|
error: {
|
||||||
useResearch,
|
code: 'TASK_COMPLETED',
|
||||||
additionalContext,
|
message: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`
|
||||||
{ mcpLog: log, session } // Only pass mcpLog and session, NOT reportProgress
|
},
|
||||||
);
|
fromCache: false
|
||||||
|
};
|
||||||
// Restore normal logging
|
}
|
||||||
disableSilentMode();
|
|
||||||
|
// Check for existing subtasks
|
||||||
// Read the updated data
|
const hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;
|
||||||
const updatedData = readJSON(tasksPath);
|
|
||||||
const updatedTask = updatedData.tasks.find(t => t.id === taskId);
|
// If the task already has subtasks, just return it (matching core behavior)
|
||||||
|
if (hasExistingSubtasks) {
|
||||||
// Calculate how many subtasks were added
|
log.info(`Task ${taskId} already has ${task.subtasks.length} subtasks`);
|
||||||
const subtasksAdded = updatedTask.subtasks ?
|
return {
|
||||||
updatedTask.subtasks.length - subtasksCountBefore : 0;
|
success: true,
|
||||||
|
data: {
|
||||||
// Return the result
|
task,
|
||||||
log.info(`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`);
|
subtasksAdded: 0,
|
||||||
return {
|
hasExistingSubtasks
|
||||||
success: true,
|
},
|
||||||
data: {
|
fromCache: false
|
||||||
task: updatedTask,
|
};
|
||||||
subtasksAdded,
|
}
|
||||||
hasExistingSubtasks
|
|
||||||
},
|
// Keep a copy of the task before modification
|
||||||
fromCache: false
|
const originalTask = JSON.parse(JSON.stringify(task));
|
||||||
};
|
|
||||||
} catch (error) {
|
// Tracking subtasks count before expansion
|
||||||
// Make sure to restore normal logging even if there's an error
|
const subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;
|
||||||
disableSilentMode();
|
|
||||||
|
// Create a backup of the tasks.json file
|
||||||
log.error(`Error expanding task: ${error.message}`);
|
const backupPath = path.join(path.dirname(tasksPath), 'tasks.json.bak');
|
||||||
return {
|
fs.copyFileSync(tasksPath, backupPath);
|
||||||
success: false,
|
|
||||||
error: {
|
// Directly modify the data instead of calling the CLI function
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
if (!task.subtasks) {
|
||||||
message: error.message || 'Failed to expand task'
|
task.subtasks = [];
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
// Save tasks.json with potentially empty subtasks array
|
||||||
}
|
writeJSON(tasksPath, data);
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error expanding task: ${error.message}`);
|
// Process the request
|
||||||
return {
|
try {
|
||||||
success: false,
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
error: {
|
enableSilentMode();
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message || 'Failed to expand task'
|
// Call expandTask with session context to ensure AI client is properly initialized
|
||||||
},
|
const result = await expandTask(
|
||||||
fromCache: false
|
tasksPath,
|
||||||
};
|
taskId,
|
||||||
}
|
numSubtasks,
|
||||||
}
|
useResearch,
|
||||||
|
additionalContext,
|
||||||
|
{ mcpLog: log, session } // Only pass mcpLog and session, NOT reportProgress
|
||||||
|
);
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
// Read the updated data
|
||||||
|
const updatedData = readJSON(tasksPath);
|
||||||
|
const updatedTask = updatedData.tasks.find((t) => t.id === taskId);
|
||||||
|
|
||||||
|
// Calculate how many subtasks were added
|
||||||
|
const subtasksAdded = updatedTask.subtasks
|
||||||
|
? updatedTask.subtasks.length - subtasksCountBefore
|
||||||
|
: 0;
|
||||||
|
|
||||||
|
// Return the result
|
||||||
|
log.info(
|
||||||
|
`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
task: updatedTask,
|
||||||
|
subtasksAdded,
|
||||||
|
hasExistingSubtasks
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error expanding task: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message || 'Failed to expand task'
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error expanding task: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message || 'Failed to expand task'
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,7 +4,10 @@
|
|||||||
|
|
||||||
import { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
import { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -16,50 +19,50 @@ import fs from 'fs';
|
|||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function fixDependenciesDirect(args, log) {
|
export async function fixDependenciesDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Fixing invalid dependencies in tasks...`);
|
log.info(`Fixing invalid dependencies in tasks...`);
|
||||||
|
|
||||||
// Find the tasks.json path
|
// Find the tasks.json path
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
|
|
||||||
// Verify the file exists
|
// Verify the file exists
|
||||||
if (!fs.existsSync(tasksPath)) {
|
if (!fs.existsSync(tasksPath)) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'FILE_NOT_FOUND',
|
code: 'FILE_NOT_FOUND',
|
||||||
message: `Tasks file not found at ${tasksPath}`
|
message: `Tasks file not found at ${tasksPath}`
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
enableSilentMode();
|
enableSilentMode();
|
||||||
|
|
||||||
// Call the original command function
|
// Call the original command function
|
||||||
await fixDependenciesCommand(tasksPath);
|
await fixDependenciesCommand(tasksPath);
|
||||||
|
|
||||||
// Restore normal logging
|
// Restore normal logging
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
message: 'Dependencies fixed successfully',
|
message: 'Dependencies fixed successfully',
|
||||||
tasksPath
|
tasksPath
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Make sure to restore normal logging even if there's an error
|
// Make sure to restore normal logging even if there's an error
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
log.error(`Error fixing dependencies: ${error.message}`);
|
log.error(`Error fixing dependencies: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'FIX_DEPENDENCIES_ERROR',
|
code: 'FIX_DEPENDENCIES_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,84 +4,91 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { generateTaskFiles } from '../../../../scripts/modules/task-manager.js';
|
import { generateTaskFiles } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for generateTaskFiles with error handling.
|
* Direct function wrapper for generateTaskFiles with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing file and output path options.
|
* @param {Object} args - Command arguments containing file and output path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function generateTaskFilesDirect(args, log) {
|
export async function generateTaskFilesDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Get tasks file path
|
// Get tasks file path
|
||||||
let tasksPath;
|
let tasksPath;
|
||||||
try {
|
try {
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error finding tasks file: ${error.message}`);
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get output directory (defaults to the same directory as the tasks file)
|
// Get output directory (defaults to the same directory as the tasks file)
|
||||||
let outputDir = args.output;
|
let outputDir = args.output;
|
||||||
if (!outputDir) {
|
if (!outputDir) {
|
||||||
outputDir = path.dirname(tasksPath);
|
outputDir = path.dirname(tasksPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(`Generating task files from ${tasksPath} to ${outputDir}`);
|
log.info(`Generating task files from ${tasksPath} to ${outputDir}`);
|
||||||
|
|
||||||
// Execute core generateTaskFiles function in a separate try/catch
|
// Execute core generateTaskFiles function in a separate try/catch
|
||||||
try {
|
try {
|
||||||
// Enable silent mode to prevent logs from being written to stdout
|
// Enable silent mode to prevent logs from being written to stdout
|
||||||
enableSilentMode();
|
enableSilentMode();
|
||||||
|
|
||||||
// The function is synchronous despite being awaited elsewhere
|
// The function is synchronous despite being awaited elsewhere
|
||||||
generateTaskFiles(tasksPath, outputDir);
|
generateTaskFiles(tasksPath, outputDir);
|
||||||
|
|
||||||
// Restore normal logging after task generation
|
// Restore normal logging after task generation
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
} catch (genError) {
|
} catch (genError) {
|
||||||
// Make sure to restore normal logging even if there's an error
|
// Make sure to restore normal logging even if there's an error
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
log.error(`Error in generateTaskFiles: ${genError.message}`);
|
log.error(`Error in generateTaskFiles: ${genError.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'GENERATE_FILES_ERROR', message: genError.message },
|
error: { code: 'GENERATE_FILES_ERROR', message: genError.message },
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return success with file paths
|
// Return success with file paths
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
message: `Successfully generated task files`,
|
message: `Successfully generated task files`,
|
||||||
tasksPath,
|
tasksPath,
|
||||||
outputDir,
|
outputDir,
|
||||||
taskFiles: 'Individual task files have been generated in the output directory'
|
taskFiles:
|
||||||
},
|
'Individual task files have been generated in the output directory'
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
},
|
||||||
};
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
} catch (error) {
|
};
|
||||||
// Make sure to restore normal logging if an outer error occurs
|
} catch (error) {
|
||||||
disableSilentMode();
|
// Make sure to restore normal logging if an outer error occurs
|
||||||
|
disableSilentMode();
|
||||||
log.error(`Error generating task files: ${error.message}`);
|
|
||||||
return {
|
log.error(`Error generating task files: ${error.message}`);
|
||||||
success: false,
|
return {
|
||||||
error: { code: 'GENERATE_TASKS_ERROR', message: error.message || 'Unknown error generating task files' },
|
success: false,
|
||||||
fromCache: false
|
error: {
|
||||||
};
|
code: 'GENERATE_TASKS_ERROR',
|
||||||
}
|
message: error.message || 'Unknown error generating task files'
|
||||||
}
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,7 +6,10 @@
|
|||||||
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for listTasks with error handling and caching.
|
* Direct function wrapper for listTasks with error handling and caching.
|
||||||
@@ -16,68 +19,102 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
|
|||||||
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
|
* @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }.
|
||||||
*/
|
*/
|
||||||
export async function listTasksDirect(args, log) {
|
export async function listTasksDirect(args, log) {
|
||||||
let tasksPath;
|
let tasksPath;
|
||||||
try {
|
try {
|
||||||
// Find the tasks path first - needed for cache key and execution
|
// Find the tasks path first - needed for cache key and execution
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
if (error.code === 'TASKS_FILE_NOT_FOUND') {
|
if (error.code === 'TASKS_FILE_NOT_FOUND') {
|
||||||
log.error(`Tasks file not found: ${error.message}`);
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
// Return the error structure expected by the calling tool/handler
|
// Return the error structure expected by the calling tool/handler
|
||||||
return { success: false, error: { code: error.code, message: error.message }, fromCache: false };
|
return {
|
||||||
}
|
success: false,
|
||||||
log.error(`Unexpected error finding tasks file: ${error.message}`);
|
error: { code: error.code, message: error.message },
|
||||||
// Re-throw for outer catch or return structured error
|
fromCache: false
|
||||||
return { success: false, error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message }, fromCache: false };
|
};
|
||||||
}
|
}
|
||||||
|
log.error(`Unexpected error finding tasks file: ${error.message}`);
|
||||||
|
// Re-throw for outer catch or return structured error
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: { code: 'FIND_TASKS_PATH_ERROR', message: error.message },
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Generate cache key *after* finding tasksPath
|
// Generate cache key *after* finding tasksPath
|
||||||
const statusFilter = args.status || 'all';
|
const statusFilter = args.status || 'all';
|
||||||
const withSubtasks = args.withSubtasks || false;
|
const withSubtasks = args.withSubtasks || false;
|
||||||
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
|
const cacheKey = `listTasks:${tasksPath}:${statusFilter}:${withSubtasks}`;
|
||||||
|
|
||||||
// Define the action function to be executed on cache miss
|
|
||||||
const coreListTasksAction = async () => {
|
|
||||||
try {
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
|
||||||
|
|
||||||
log.info(`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`);
|
|
||||||
const resultData = listTasks(tasksPath, statusFilter, withSubtasks, 'json');
|
|
||||||
|
|
||||||
if (!resultData || !resultData.tasks) {
|
// Define the action function to be executed on cache miss
|
||||||
log.error('Invalid or empty response from listTasks core function');
|
const coreListTasksAction = async () => {
|
||||||
return { success: false, error: { code: 'INVALID_CORE_RESPONSE', message: 'Invalid or empty response from listTasks core function' } };
|
try {
|
||||||
}
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
log.info(`Core listTasks function retrieved ${resultData.tasks.length} tasks`);
|
enableSilentMode();
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
return { success: true, data: resultData };
|
|
||||||
|
|
||||||
} catch (error) {
|
log.info(
|
||||||
// Make sure to restore normal logging even if there's an error
|
`Executing core listTasks function for path: ${tasksPath}, filter: ${statusFilter}, subtasks: ${withSubtasks}`
|
||||||
disableSilentMode();
|
);
|
||||||
|
const resultData = listTasks(
|
||||||
log.error(`Core listTasks function failed: ${error.message}`);
|
tasksPath,
|
||||||
return { success: false, error: { code: 'LIST_TASKS_CORE_ERROR', message: error.message || 'Failed to list tasks' } };
|
statusFilter,
|
||||||
}
|
withSubtasks,
|
||||||
};
|
'json'
|
||||||
|
);
|
||||||
|
|
||||||
// Use the caching utility
|
if (!resultData || !resultData.tasks) {
|
||||||
try {
|
log.error('Invalid or empty response from listTasks core function');
|
||||||
const result = await getCachedOrExecute({
|
return {
|
||||||
cacheKey,
|
success: false,
|
||||||
actionFn: coreListTasksAction,
|
error: {
|
||||||
log
|
code: 'INVALID_CORE_RESPONSE',
|
||||||
});
|
message: 'Invalid or empty response from listTasks core function'
|
||||||
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
}
|
||||||
return result; // Returns { success, data/error, fromCache }
|
};
|
||||||
} catch(error) {
|
}
|
||||||
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
log.info(
|
||||||
log.error(`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`);
|
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
|
||||||
console.error(error.stack);
|
);
|
||||||
return { success: false, error: { code: 'CACHE_UTIL_ERROR', message: error.message }, fromCache: false };
|
|
||||||
}
|
// Restore normal logging
|
||||||
}
|
disableSilentMode();
|
||||||
|
|
||||||
|
return { success: true, data: resultData };
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Core listTasks function failed: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'LIST_TASKS_CORE_ERROR',
|
||||||
|
message: error.message || 'Failed to list tasks'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use the caching utility
|
||||||
|
try {
|
||||||
|
const result = await getCachedOrExecute({
|
||||||
|
cacheKey,
|
||||||
|
actionFn: coreListTasksAction,
|
||||||
|
log
|
||||||
|
});
|
||||||
|
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
||||||
|
return result; // Returns { success, data/error, fromCache }
|
||||||
|
} catch (error) {
|
||||||
|
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
||||||
|
log.error(
|
||||||
|
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
|
||||||
|
);
|
||||||
|
console.error(error.stack);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: { code: 'CACHE_UTIL_ERROR', message: error.message },
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,7 +7,10 @@ import { findNextTask } from '../../../../scripts/modules/task-manager.js';
|
|||||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
import { readJSON } from '../../../../scripts/modules/utils.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for finding the next task to work on with error handling and caching.
|
* Direct function wrapper for finding the next task to work on with error handling and caching.
|
||||||
@@ -17,106 +20,113 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
|
|||||||
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||||
*/
|
*/
|
||||||
export async function nextTaskDirect(args, log) {
|
export async function nextTaskDirect(args, log) {
|
||||||
let tasksPath;
|
let tasksPath;
|
||||||
try {
|
try {
|
||||||
// Find the tasks path first - needed for cache key and execution
|
// Find the tasks path first - needed for cache key and execution
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Tasks file not found: ${error.message}`);
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate cache key using task path
|
// Generate cache key using task path
|
||||||
const cacheKey = `nextTask:${tasksPath}`;
|
const cacheKey = `nextTask:${tasksPath}`;
|
||||||
|
|
||||||
// Define the action function to be executed on cache miss
|
|
||||||
const coreNextTaskAction = async () => {
|
|
||||||
try {
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
|
||||||
|
|
||||||
log.info(`Finding next task from ${tasksPath}`);
|
|
||||||
|
|
||||||
// Read tasks data
|
|
||||||
const data = readJSON(tasksPath);
|
|
||||||
if (!data || !data.tasks) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'INVALID_TASKS_FILE',
|
|
||||||
message: `No valid tasks found in ${tasksPath}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the next task
|
|
||||||
const nextTask = findNextTask(data.tasks);
|
|
||||||
|
|
||||||
if (!nextTask) {
|
|
||||||
log.info('No eligible next task found. All tasks are either completed or have unsatisfied dependencies');
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
message: 'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',
|
|
||||||
nextTask: null,
|
|
||||||
allTasks: data.tasks
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
// Return the next task data with the full tasks array for reference
|
|
||||||
log.info(`Successfully found next task ${nextTask.id}: ${nextTask.title}`);
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
nextTask,
|
|
||||||
allTasks: data.tasks
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error finding next task: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message || 'Failed to find next task'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the caching utility
|
// Define the action function to be executed on cache miss
|
||||||
try {
|
const coreNextTaskAction = async () => {
|
||||||
const result = await getCachedOrExecute({
|
try {
|
||||||
cacheKey,
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
actionFn: coreNextTaskAction,
|
enableSilentMode();
|
||||||
log
|
|
||||||
});
|
log.info(`Finding next task from ${tasksPath}`);
|
||||||
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
// Read tasks data
|
||||||
} catch (error) {
|
const data = readJSON(tasksPath);
|
||||||
// Catch unexpected errors from getCachedOrExecute itself
|
if (!data || !data.tasks) {
|
||||||
log.error(`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`);
|
return {
|
||||||
return {
|
success: false,
|
||||||
success: false,
|
error: {
|
||||||
error: {
|
code: 'INVALID_TASKS_FILE',
|
||||||
code: 'UNEXPECTED_ERROR',
|
message: `No valid tasks found in ${tasksPath}`
|
||||||
message: error.message
|
}
|
||||||
},
|
};
|
||||||
fromCache: false
|
}
|
||||||
};
|
|
||||||
}
|
// Find the next task
|
||||||
}
|
const nextTask = findNextTask(data.tasks);
|
||||||
|
|
||||||
|
if (!nextTask) {
|
||||||
|
log.info(
|
||||||
|
'No eligible next task found. All tasks are either completed or have unsatisfied dependencies'
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message:
|
||||||
|
'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',
|
||||||
|
nextTask: null,
|
||||||
|
allTasks: data.tasks
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
// Return the next task data with the full tasks array for reference
|
||||||
|
log.info(
|
||||||
|
`Successfully found next task ${nextTask.id}: ${nextTask.title}`
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
nextTask,
|
||||||
|
allTasks: data.tasks
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error finding next task: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message || 'Failed to find next task'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use the caching utility
|
||||||
|
try {
|
||||||
|
const result = await getCachedOrExecute({
|
||||||
|
cacheKey,
|
||||||
|
actionFn: coreNextTaskAction,
|
||||||
|
log
|
||||||
|
});
|
||||||
|
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
|
||||||
|
return result; // Returns { success, data/error, fromCache }
|
||||||
|
} catch (error) {
|
||||||
|
// Catch unexpected errors from getCachedOrExecute itself
|
||||||
|
log.error(
|
||||||
|
`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'UNEXPECTED_ERROR',
|
||||||
|
message: error.message
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,144 +7,172 @@ import path from 'path';
|
|||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import { parsePRD } from '../../../../scripts/modules/task-manager.js';
|
import { parsePRD } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
import { getAnthropicClientForMCP, getModelConfig } from '../utils/ai-client-utils.js';
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
import {
|
||||||
|
getAnthropicClientForMCP,
|
||||||
|
getModelConfig
|
||||||
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for parsing PRD documents and generating tasks.
|
* Direct function wrapper for parsing PRD documents and generating tasks.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing input, numTasks or tasks, and output options.
|
* @param {Object} args - Command arguments containing input, numTasks or tasks, and output options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @param {Object} context - Context object containing session data.
|
* @param {Object} context - Context object containing session data.
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function parsePRDDirect(args, log, context = {}) {
|
export async function parsePRDDirect(args, log, context = {}) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
const { session } = context; // Only extract session, not reportProgress
|
||||||
|
|
||||||
try {
|
|
||||||
log.info(`Parsing PRD document with args: ${JSON.stringify(args)}`);
|
|
||||||
|
|
||||||
// Initialize AI client for PRD parsing
|
|
||||||
let aiClient;
|
|
||||||
try {
|
|
||||||
aiClient = getAnthropicClientForMCP(session, log);
|
|
||||||
} catch (error) {
|
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'AI_CLIENT_ERROR',
|
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
|
||||||
},
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parameter validation and path resolution
|
|
||||||
if (!args.input) {
|
|
||||||
const errorMessage = 'No input file specified. Please provide an input PRD document path.';
|
|
||||||
log.error(errorMessage);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: { code: 'MISSING_INPUT_FILE', message: errorMessage },
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve input path (relative to project root if provided)
|
|
||||||
const projectRoot = args.projectRoot || process.cwd();
|
|
||||||
const inputPath = path.isAbsolute(args.input) ? args.input : path.resolve(projectRoot, args.input);
|
|
||||||
|
|
||||||
// Determine output path
|
|
||||||
let outputPath;
|
|
||||||
if (args.output) {
|
|
||||||
outputPath = path.isAbsolute(args.output) ? args.output : path.resolve(projectRoot, args.output);
|
|
||||||
} else {
|
|
||||||
// Default to tasks/tasks.json in the project root
|
|
||||||
outputPath = path.resolve(projectRoot, 'tasks', 'tasks.json');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify input file exists
|
|
||||||
if (!fs.existsSync(inputPath)) {
|
|
||||||
const errorMessage = `Input file not found: ${inputPath}`;
|
|
||||||
log.error(errorMessage);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: { code: 'INPUT_FILE_NOT_FOUND', message: errorMessage },
|
|
||||||
fromCache: false
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse number of tasks - handle both string and number values
|
|
||||||
let numTasks = 10; // Default
|
|
||||||
if (args.numTasks) {
|
|
||||||
numTasks = typeof args.numTasks === 'string' ? parseInt(args.numTasks, 10) : args.numTasks;
|
|
||||||
if (isNaN(numTasks)) {
|
|
||||||
numTasks = 10; // Fallback to default if parsing fails
|
|
||||||
log.warn(`Invalid numTasks value: ${args.numTasks}. Using default: 10`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info(`Preparing to parse PRD from ${inputPath} and output to ${outputPath} with ${numTasks} tasks`);
|
|
||||||
|
|
||||||
// Create the logger wrapper for proper logging in the core function
|
|
||||||
const logWrapper = {
|
|
||||||
info: (message, ...args) => log.info(message, ...args),
|
|
||||||
warn: (message, ...args) => log.warn(message, ...args),
|
|
||||||
error: (message, ...args) => log.error(message, ...args),
|
|
||||||
debug: (message, ...args) => log.debug && log.debug(message, ...args),
|
|
||||||
success: (message, ...args) => log.info(message, ...args) // Map success to info
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get model config from session
|
try {
|
||||||
const modelConfig = getModelConfig(session);
|
log.info(`Parsing PRD document with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Initialize AI client for PRD parsing
|
||||||
enableSilentMode();
|
let aiClient;
|
||||||
try {
|
try {
|
||||||
// Execute core parsePRD function with AI client
|
aiClient = getAnthropicClientForMCP(session, log);
|
||||||
await parsePRD(inputPath, outputPath, numTasks, {
|
} catch (error) {
|
||||||
mcpLog: logWrapper,
|
log.error(`Failed to initialize AI client: ${error.message}`);
|
||||||
session
|
return {
|
||||||
}, aiClient, modelConfig);
|
success: false,
|
||||||
|
error: {
|
||||||
// Since parsePRD doesn't return a value but writes to a file, we'll read the result
|
code: 'AI_CLIENT_ERROR',
|
||||||
// to return it to the caller
|
message: `Cannot initialize AI client: ${error.message}`
|
||||||
if (fs.existsSync(outputPath)) {
|
},
|
||||||
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
fromCache: false
|
||||||
log.info(`Successfully parsed PRD and generated ${tasksData.tasks?.length || 0} tasks`);
|
};
|
||||||
|
}
|
||||||
return {
|
|
||||||
success: true,
|
// Parameter validation and path resolution
|
||||||
data: {
|
if (!args.input) {
|
||||||
message: `Successfully generated ${tasksData.tasks?.length || 0} tasks from PRD`,
|
const errorMessage =
|
||||||
taskCount: tasksData.tasks?.length || 0,
|
'No input file specified. Please provide an input PRD document path.';
|
||||||
outputPath
|
log.error(errorMessage);
|
||||||
},
|
return {
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
success: false,
|
||||||
};
|
error: { code: 'MISSING_INPUT_FILE', message: errorMessage },
|
||||||
} else {
|
fromCache: false
|
||||||
const errorMessage = `Tasks file was not created at ${outputPath}`;
|
};
|
||||||
log.error(errorMessage);
|
}
|
||||||
return {
|
|
||||||
success: false,
|
// Resolve input path (relative to project root if provided)
|
||||||
error: { code: 'OUTPUT_FILE_NOT_CREATED', message: errorMessage },
|
const projectRoot = args.projectRoot || process.cwd();
|
||||||
fromCache: false
|
const inputPath = path.isAbsolute(args.input)
|
||||||
};
|
? args.input
|
||||||
}
|
: path.resolve(projectRoot, args.input);
|
||||||
} finally {
|
|
||||||
// Always restore normal logging
|
// Determine output path
|
||||||
disableSilentMode();
|
let outputPath;
|
||||||
}
|
if (args.output) {
|
||||||
} catch (error) {
|
outputPath = path.isAbsolute(args.output)
|
||||||
// Make sure to restore normal logging even if there's an error
|
? args.output
|
||||||
disableSilentMode();
|
: path.resolve(projectRoot, args.output);
|
||||||
|
} else {
|
||||||
log.error(`Error parsing PRD: ${error.message}`);
|
// Default to tasks/tasks.json in the project root
|
||||||
return {
|
outputPath = path.resolve(projectRoot, 'tasks', 'tasks.json');
|
||||||
success: false,
|
}
|
||||||
error: { code: 'PARSE_PRD_ERROR', message: error.message || 'Unknown error parsing PRD' },
|
|
||||||
fromCache: false
|
// Verify input file exists
|
||||||
};
|
if (!fs.existsSync(inputPath)) {
|
||||||
}
|
const errorMessage = `Input file not found: ${inputPath}`;
|
||||||
}
|
log.error(errorMessage);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: { code: 'INPUT_FILE_NOT_FOUND', message: errorMessage },
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse number of tasks - handle both string and number values
|
||||||
|
let numTasks = 10; // Default
|
||||||
|
if (args.numTasks) {
|
||||||
|
numTasks =
|
||||||
|
typeof args.numTasks === 'string'
|
||||||
|
? parseInt(args.numTasks, 10)
|
||||||
|
: args.numTasks;
|
||||||
|
if (isNaN(numTasks)) {
|
||||||
|
numTasks = 10; // Fallback to default if parsing fails
|
||||||
|
log.warn(`Invalid numTasks value: ${args.numTasks}. Using default: 10`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info(
|
||||||
|
`Preparing to parse PRD from ${inputPath} and output to ${outputPath} with ${numTasks} tasks`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create the logger wrapper for proper logging in the core function
|
||||||
|
const logWrapper = {
|
||||||
|
info: (message, ...args) => log.info(message, ...args),
|
||||||
|
warn: (message, ...args) => log.warn(message, ...args),
|
||||||
|
error: (message, ...args) => log.error(message, ...args),
|
||||||
|
debug: (message, ...args) => log.debug && log.debug(message, ...args),
|
||||||
|
success: (message, ...args) => log.info(message, ...args) // Map success to info
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get model config from session
|
||||||
|
const modelConfig = getModelConfig(session);
|
||||||
|
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
|
enableSilentMode();
|
||||||
|
try {
|
||||||
|
// Execute core parsePRD function with AI client
|
||||||
|
await parsePRD(
|
||||||
|
inputPath,
|
||||||
|
outputPath,
|
||||||
|
numTasks,
|
||||||
|
{
|
||||||
|
mcpLog: logWrapper,
|
||||||
|
session
|
||||||
|
},
|
||||||
|
aiClient,
|
||||||
|
modelConfig
|
||||||
|
);
|
||||||
|
|
||||||
|
// Since parsePRD doesn't return a value but writes to a file, we'll read the result
|
||||||
|
// to return it to the caller
|
||||||
|
if (fs.existsSync(outputPath)) {
|
||||||
|
const tasksData = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
|
||||||
|
log.info(
|
||||||
|
`Successfully parsed PRD and generated ${tasksData.tasks?.length || 0} tasks`
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: `Successfully generated ${tasksData.tasks?.length || 0} tasks from PRD`,
|
||||||
|
taskCount: tasksData.tasks?.length || 0,
|
||||||
|
outputPath
|
||||||
|
},
|
||||||
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
const errorMessage = `Tasks file was not created at ${outputPath}`;
|
||||||
|
log.error(errorMessage);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: { code: 'OUTPUT_FILE_NOT_CREATED', message: errorMessage },
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
// Always restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error parsing PRD: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'PARSE_PRD_ERROR',
|
||||||
|
message: error.message || 'Unknown error parsing PRD'
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,7 +4,10 @@
|
|||||||
|
|
||||||
import { removeDependency } from '../../../../scripts/modules/dependency-manager.js';
|
import { removeDependency } from '../../../../scripts/modules/dependency-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove a dependency from a task
|
* Remove a dependency from a task
|
||||||
@@ -17,67 +20,75 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
|
|||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function removeDependencyDirect(args, log) {
|
export async function removeDependencyDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Removing dependency with args: ${JSON.stringify(args)}`);
|
log.info(`Removing dependency with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Validate required parameters
|
// Validate required parameters
|
||||||
if (!args.id) {
|
if (!args.id) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Task ID (id) is required'
|
message: 'Task ID (id) is required'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!args.dependsOn) {
|
if (!args.dependsOn) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Dependency ID (dependsOn) is required'
|
message: 'Dependency ID (dependsOn) is required'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the tasks.json path
|
// Find the tasks.json path
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
|
|
||||||
// Format IDs for the core function
|
// Format IDs for the core function
|
||||||
const taskId = args.id.includes && args.id.includes('.') ? args.id : parseInt(args.id, 10);
|
const taskId =
|
||||||
const dependencyId = args.dependsOn.includes && args.dependsOn.includes('.') ? args.dependsOn : parseInt(args.dependsOn, 10);
|
args.id.includes && args.id.includes('.')
|
||||||
|
? args.id
|
||||||
log.info(`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`);
|
: parseInt(args.id, 10);
|
||||||
|
const dependencyId =
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
args.dependsOn.includes && args.dependsOn.includes('.')
|
||||||
enableSilentMode();
|
? args.dependsOn
|
||||||
|
: parseInt(args.dependsOn, 10);
|
||||||
// Call the core function
|
|
||||||
await removeDependency(tasksPath, taskId, dependencyId);
|
log.info(
|
||||||
|
`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`
|
||||||
// Restore normal logging
|
);
|
||||||
disableSilentMode();
|
|
||||||
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
return {
|
enableSilentMode();
|
||||||
success: true,
|
|
||||||
data: {
|
// Call the core function
|
||||||
message: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,
|
await removeDependency(tasksPath, taskId, dependencyId);
|
||||||
taskId: taskId,
|
|
||||||
dependencyId: dependencyId
|
// Restore normal logging
|
||||||
}
|
disableSilentMode();
|
||||||
};
|
|
||||||
} catch (error) {
|
return {
|
||||||
// Make sure to restore normal logging even if there's an error
|
success: true,
|
||||||
disableSilentMode();
|
data: {
|
||||||
|
message: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,
|
||||||
log.error(`Error in removeDependencyDirect: ${error.message}`);
|
taskId: taskId,
|
||||||
return {
|
dependencyId: dependencyId
|
||||||
success: false,
|
}
|
||||||
error: {
|
};
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
} catch (error) {
|
||||||
message: error.message
|
// Make sure to restore normal logging even if there's an error
|
||||||
}
|
disableSilentMode();
|
||||||
};
|
|
||||||
}
|
log.error(`Error in removeDependencyDirect: ${error.message}`);
|
||||||
}
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,7 +4,10 @@
|
|||||||
|
|
||||||
import { removeSubtask } from '../../../../scripts/modules/task-manager.js';
|
import { removeSubtask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove a subtask from its parent task
|
* Remove a subtask from its parent task
|
||||||
@@ -18,78 +21,86 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
|
|||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function removeSubtaskDirect(args, log) {
|
export async function removeSubtaskDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
enableSilentMode();
|
enableSilentMode();
|
||||||
|
|
||||||
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
|
||||||
|
|
||||||
if (!args.id) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
|
||||||
message: 'Subtask ID is required and must be in format "parentId.subtaskId"'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate subtask ID format
|
|
||||||
if (!args.id.includes('.')) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
|
||||||
message: `Invalid subtask ID format: ${args.id}. Expected format: "parentId.subtaskId"`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the tasks.json path
|
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
|
||||||
|
if (!args.id) {
|
||||||
// Convert convertToTask to a boolean
|
return {
|
||||||
const convertToTask = args.convert === true;
|
success: false,
|
||||||
|
error: {
|
||||||
// Determine if we should generate files
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
const generateFiles = !args.skipGenerate;
|
message:
|
||||||
|
'Subtask ID is required and must be in format "parentId.subtaskId"'
|
||||||
log.info(`Removing subtask ${args.id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`);
|
}
|
||||||
|
};
|
||||||
const result = await removeSubtask(tasksPath, args.id, convertToTask, generateFiles);
|
}
|
||||||
|
|
||||||
// Restore normal logging
|
// Validate subtask ID format
|
||||||
disableSilentMode();
|
if (!args.id.includes('.')) {
|
||||||
|
return {
|
||||||
if (convertToTask && result) {
|
success: false,
|
||||||
// Return info about the converted task
|
error: {
|
||||||
return {
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
success: true,
|
message: `Invalid subtask ID format: ${args.id}. Expected format: "parentId.subtaskId"`
|
||||||
data: {
|
}
|
||||||
message: `Subtask ${args.id} successfully converted to task #${result.id}`,
|
};
|
||||||
task: result
|
}
|
||||||
}
|
|
||||||
};
|
// Find the tasks.json path
|
||||||
} else {
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
// Return simple success message for deletion
|
|
||||||
return {
|
// Convert convertToTask to a boolean
|
||||||
success: true,
|
const convertToTask = args.convert === true;
|
||||||
data: {
|
|
||||||
message: `Subtask ${args.id} successfully removed`
|
// Determine if we should generate files
|
||||||
}
|
const generateFiles = !args.skipGenerate;
|
||||||
};
|
|
||||||
}
|
log.info(
|
||||||
} catch (error) {
|
`Removing subtask ${args.id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`
|
||||||
// Ensure silent mode is disabled even if an outer error occurs
|
);
|
||||||
disableSilentMode();
|
|
||||||
|
const result = await removeSubtask(
|
||||||
log.error(`Error in removeSubtaskDirect: ${error.message}`);
|
tasksPath,
|
||||||
return {
|
args.id,
|
||||||
success: false,
|
convertToTask,
|
||||||
error: {
|
generateFiles
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
);
|
||||||
message: error.message
|
|
||||||
}
|
// Restore normal logging
|
||||||
};
|
disableSilentMode();
|
||||||
}
|
|
||||||
}
|
if (convertToTask && result) {
|
||||||
|
// Return info about the converted task
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: `Subtask ${args.id} successfully converted to task #${result.id}`,
|
||||||
|
task: result
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
// Return simple success message for deletion
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
message: `Subtask ${args.id} successfully removed`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Ensure silent mode is disabled even if an outer error occurs
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error in removeSubtaskDirect: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,7 +4,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { removeTask } from '../../../../scripts/modules/task-manager.js';
|
import { removeTask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -15,90 +18,90 @@ import { findTasksJsonPath } from '../utils/path-utils.js';
|
|||||||
* @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: false }
|
* @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: false }
|
||||||
*/
|
*/
|
||||||
export async function removeTaskDirect(args, log) {
|
export async function removeTaskDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
// Find the tasks path first
|
// Find the tasks path first
|
||||||
let tasksPath;
|
let tasksPath;
|
||||||
try {
|
try {
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Tasks file not found: ${error.message}`);
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate task ID parameter
|
// Validate task ID parameter
|
||||||
const taskId = args.id;
|
const taskId = args.id;
|
||||||
if (!taskId) {
|
if (!taskId) {
|
||||||
log.error('Task ID is required');
|
log.error('Task ID is required');
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Task ID is required'
|
message: 'Task ID is required'
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip confirmation in the direct function since it's handled by the client
|
// Skip confirmation in the direct function since it's handled by the client
|
||||||
log.info(`Removing task with ID: ${taskId} from ${tasksPath}`);
|
log.info(`Removing task with ID: ${taskId} from ${tasksPath}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
enableSilentMode();
|
enableSilentMode();
|
||||||
|
|
||||||
// Call the core removeTask function
|
// Call the core removeTask function
|
||||||
const result = await removeTask(tasksPath, taskId);
|
const result = await removeTask(tasksPath, taskId);
|
||||||
|
|
||||||
// Restore normal logging
|
// Restore normal logging
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
log.info(`Successfully removed task: ${taskId}`);
|
log.info(`Successfully removed task: ${taskId}`);
|
||||||
|
|
||||||
// Return the result
|
// Return the result
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
message: result.message,
|
message: result.message,
|
||||||
taskId: taskId,
|
taskId: taskId,
|
||||||
tasksPath: tasksPath,
|
tasksPath: tasksPath,
|
||||||
removedTask: result.removedTask
|
removedTask: result.removedTask
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Make sure to restore normal logging even if there's an error
|
// Make sure to restore normal logging even if there's an error
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
log.error(`Error removing task: ${error.message}`);
|
log.error(`Error removing task: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: error.code || 'REMOVE_TASK_ERROR',
|
code: error.code || 'REMOVE_TASK_ERROR',
|
||||||
message: error.message || 'Failed to remove task'
|
message: error.message || 'Failed to remove task'
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Ensure silent mode is disabled even if an outer error occurs
|
// Ensure silent mode is disabled even if an outer error occurs
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
// Catch any unexpected errors
|
// Catch any unexpected errors
|
||||||
log.error(`Unexpected error in removeTaskDirect: ${error.message}`);
|
log.error(`Unexpected error in removeTaskDirect: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'UNEXPECTED_ERROR',
|
code: 'UNEXPECTED_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,108 +5,120 @@
|
|||||||
|
|
||||||
import { setTaskStatus } from '../../../../scripts/modules/task-manager.js';
|
import { setTaskStatus } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode, isSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode,
|
||||||
|
isSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for setTaskStatus with error handling.
|
* Direct function wrapper for setTaskStatus with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing id, status and file path options.
|
* @param {Object} args - Command arguments containing id, status and file path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function setTaskStatusDirect(args, log) {
|
export async function setTaskStatusDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Setting task status with args: ${JSON.stringify(args)}`);
|
log.info(`Setting task status with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Check required parameters
|
// Check required parameters
|
||||||
if (!args.id) {
|
if (!args.id) {
|
||||||
const errorMessage = 'No task ID specified. Please provide a task ID to update.';
|
const errorMessage =
|
||||||
log.error(errorMessage);
|
'No task ID specified. Please provide a task ID to update.';
|
||||||
return {
|
log.error(errorMessage);
|
||||||
success: false,
|
return {
|
||||||
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
success: false,
|
||||||
fromCache: false
|
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
||||||
};
|
fromCache: false
|
||||||
}
|
};
|
||||||
|
}
|
||||||
if (!args.status) {
|
|
||||||
const errorMessage = 'No status specified. Please provide a new status value.';
|
if (!args.status) {
|
||||||
log.error(errorMessage);
|
const errorMessage =
|
||||||
return {
|
'No status specified. Please provide a new status value.';
|
||||||
success: false,
|
log.error(errorMessage);
|
||||||
error: { code: 'MISSING_STATUS', message: errorMessage },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'MISSING_STATUS', message: errorMessage },
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Get tasks file path
|
}
|
||||||
let tasksPath;
|
|
||||||
try {
|
// Get tasks file path
|
||||||
// The enhanced findTasksJsonPath will now search in parent directories if needed
|
let tasksPath;
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
try {
|
||||||
log.info(`Found tasks file at: ${tasksPath}`);
|
// The enhanced findTasksJsonPath will now search in parent directories if needed
|
||||||
} catch (error) {
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
log.error(`Error finding tasks file: ${error.message}`);
|
log.info(`Found tasks file at: ${tasksPath}`);
|
||||||
return {
|
} catch (error) {
|
||||||
success: false,
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
error: {
|
return {
|
||||||
code: 'TASKS_FILE_ERROR',
|
success: false,
|
||||||
message: `${error.message}\n\nPlease ensure you are in a Task Master project directory or use the --project-root parameter to specify the path to your project.`
|
error: {
|
||||||
},
|
code: 'TASKS_FILE_ERROR',
|
||||||
fromCache: false
|
message: `${error.message}\n\nPlease ensure you are in a Task Master project directory or use the --project-root parameter to specify the path to your project.`
|
||||||
};
|
},
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Execute core setTaskStatus function
|
}
|
||||||
const taskId = args.id;
|
|
||||||
const newStatus = args.status;
|
// Execute core setTaskStatus function
|
||||||
|
const taskId = args.id;
|
||||||
log.info(`Setting task ${taskId} status to "${newStatus}"`);
|
const newStatus = args.status;
|
||||||
|
|
||||||
// Call the core function with proper silent mode handling
|
log.info(`Setting task ${taskId} status to "${newStatus}"`);
|
||||||
let result;
|
|
||||||
enableSilentMode(); // Enable silent mode before calling core function
|
// Call the core function with proper silent mode handling
|
||||||
try {
|
let result;
|
||||||
// Call the core function
|
enableSilentMode(); // Enable silent mode before calling core function
|
||||||
await setTaskStatus(tasksPath, taskId, newStatus, { mcpLog: log });
|
try {
|
||||||
|
// Call the core function
|
||||||
log.info(`Successfully set task ${taskId} status to ${newStatus}`);
|
await setTaskStatus(tasksPath, taskId, newStatus, { mcpLog: log });
|
||||||
|
|
||||||
// Return success data
|
log.info(`Successfully set task ${taskId} status to ${newStatus}`);
|
||||||
result = {
|
|
||||||
success: true,
|
// Return success data
|
||||||
data: {
|
result = {
|
||||||
message: `Successfully updated task ${taskId} status to "${newStatus}"`,
|
success: true,
|
||||||
taskId,
|
data: {
|
||||||
status: newStatus,
|
message: `Successfully updated task ${taskId} status to "${newStatus}"`,
|
||||||
tasksPath
|
taskId,
|
||||||
},
|
status: newStatus,
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
tasksPath
|
||||||
};
|
},
|
||||||
} catch (error) {
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
log.error(`Error setting task status: ${error.message}`);
|
};
|
||||||
result = {
|
} catch (error) {
|
||||||
success: false,
|
log.error(`Error setting task status: ${error.message}`);
|
||||||
error: { code: 'SET_STATUS_ERROR', message: error.message || 'Unknown error setting task status' },
|
result = {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: {
|
||||||
} finally {
|
code: 'SET_STATUS_ERROR',
|
||||||
// ALWAYS restore normal logging in finally block
|
message: error.message || 'Unknown error setting task status'
|
||||||
disableSilentMode();
|
},
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
return result;
|
} finally {
|
||||||
} catch (error) {
|
// ALWAYS restore normal logging in finally block
|
||||||
// Ensure silent mode is disabled if there was an uncaught error in the outer try block
|
disableSilentMode();
|
||||||
if (isSilentMode()) {
|
}
|
||||||
disableSilentMode();
|
|
||||||
}
|
return result;
|
||||||
|
} catch (error) {
|
||||||
log.error(`Error setting task status: ${error.message}`);
|
// Ensure silent mode is disabled if there was an uncaught error in the outer try block
|
||||||
return {
|
if (isSilentMode()) {
|
||||||
success: false,
|
disableSilentMode();
|
||||||
error: { code: 'SET_STATUS_ERROR', message: error.message || 'Unknown error setting task status' },
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
log.error(`Error setting task status: ${error.message}`);
|
||||||
}
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'SET_STATUS_ERROR',
|
||||||
|
message: error.message || 'Unknown error setting task status'
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,7 +7,10 @@ import { findTaskById } from '../../../../scripts/modules/utils.js';
|
|||||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
import { readJSON } from '../../../../scripts/modules/utils.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for showing task details with error handling and caching.
|
* Direct function wrapper for showing task details with error handling and caching.
|
||||||
@@ -17,120 +20,122 @@ import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules
|
|||||||
* @returns {Promise<Object>} - Task details result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* @returns {Promise<Object>} - Task details result { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||||
*/
|
*/
|
||||||
export async function showTaskDirect(args, log) {
|
export async function showTaskDirect(args, log) {
|
||||||
let tasksPath;
|
let tasksPath;
|
||||||
try {
|
try {
|
||||||
// Find the tasks path first - needed for cache key and execution
|
// Find the tasks path first - needed for cache key and execution
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Tasks file not found: ${error.message}`);
|
log.error(`Tasks file not found: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'FILE_NOT_FOUND_ERROR',
|
code: 'FILE_NOT_FOUND_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate task ID
|
// Validate task ID
|
||||||
const taskId = args.id;
|
const taskId = args.id;
|
||||||
if (!taskId) {
|
if (!taskId) {
|
||||||
log.error('Task ID is required');
|
log.error('Task ID is required');
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'INPUT_VALIDATION_ERROR',
|
code: 'INPUT_VALIDATION_ERROR',
|
||||||
message: 'Task ID is required'
|
message: 'Task ID is required'
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate cache key using task path and ID
|
// Generate cache key using task path and ID
|
||||||
const cacheKey = `showTask:${tasksPath}:${taskId}`;
|
const cacheKey = `showTask:${tasksPath}:${taskId}`;
|
||||||
|
|
||||||
// Define the action function to be executed on cache miss
|
|
||||||
const coreShowTaskAction = async () => {
|
|
||||||
try {
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
|
||||||
|
|
||||||
log.info(`Retrieving task details for ID: ${taskId} from ${tasksPath}`);
|
|
||||||
|
|
||||||
// Read tasks data
|
|
||||||
const data = readJSON(tasksPath);
|
|
||||||
if (!data || !data.tasks) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'INVALID_TASKS_FILE',
|
|
||||||
message: `No valid tasks found in ${tasksPath}`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the specific task
|
|
||||||
const task = findTaskById(data.tasks, taskId);
|
|
||||||
|
|
||||||
if (!task) {
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'TASK_NOT_FOUND',
|
|
||||||
message: `Task with ID ${taskId} not found`
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restore normal logging
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
// Return the task data with the full tasks array for reference
|
|
||||||
// (needed for formatDependenciesWithStatus function in UI)
|
|
||||||
log.info(`Successfully found task ${taskId}`);
|
|
||||||
return {
|
|
||||||
success: true,
|
|
||||||
data: {
|
|
||||||
task,
|
|
||||||
allTasks: data.tasks
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
|
||||||
disableSilentMode();
|
|
||||||
|
|
||||||
log.error(`Error showing task: ${error.message}`);
|
|
||||||
return {
|
|
||||||
success: false,
|
|
||||||
error: {
|
|
||||||
code: 'CORE_FUNCTION_ERROR',
|
|
||||||
message: error.message || 'Failed to show task details'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use the caching utility
|
// Define the action function to be executed on cache miss
|
||||||
try {
|
const coreShowTaskAction = async () => {
|
||||||
const result = await getCachedOrExecute({
|
try {
|
||||||
cacheKey,
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
actionFn: coreShowTaskAction,
|
enableSilentMode();
|
||||||
log
|
|
||||||
});
|
log.info(`Retrieving task details for ID: ${taskId} from ${tasksPath}`);
|
||||||
log.info(`showTaskDirect completed. From cache: ${result.fromCache}`);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
// Read tasks data
|
||||||
} catch (error) {
|
const data = readJSON(tasksPath);
|
||||||
// Catch unexpected errors from getCachedOrExecute itself
|
if (!data || !data.tasks) {
|
||||||
disableSilentMode();
|
return {
|
||||||
log.error(`Unexpected error during getCachedOrExecute for showTask: ${error.message}`);
|
success: false,
|
||||||
return {
|
error: {
|
||||||
success: false,
|
code: 'INVALID_TASKS_FILE',
|
||||||
error: {
|
message: `No valid tasks found in ${tasksPath}`
|
||||||
code: 'UNEXPECTED_ERROR',
|
}
|
||||||
message: error.message
|
};
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
// Find the specific task
|
||||||
}
|
const task = findTaskById(data.tasks, taskId);
|
||||||
}
|
|
||||||
|
if (!task) {
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'TASK_NOT_FOUND',
|
||||||
|
message: `Task with ID ${taskId} not found`
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
// Return the task data with the full tasks array for reference
|
||||||
|
// (needed for formatDependenciesWithStatus function in UI)
|
||||||
|
log.info(`Successfully found task ${taskId}`);
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
task,
|
||||||
|
allTasks: data.tasks
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
// Make sure to restore normal logging even if there's an error
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error showing task: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'CORE_FUNCTION_ERROR',
|
||||||
|
message: error.message || 'Failed to show task details'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use the caching utility
|
||||||
|
try {
|
||||||
|
const result = await getCachedOrExecute({
|
||||||
|
cacheKey,
|
||||||
|
actionFn: coreShowTaskAction,
|
||||||
|
log
|
||||||
|
});
|
||||||
|
log.info(`showTaskDirect completed. From cache: ${result.fromCache}`);
|
||||||
|
return result; // Returns { success, data/error, fromCache }
|
||||||
|
} catch (error) {
|
||||||
|
// Catch unexpected errors from getCachedOrExecute itself
|
||||||
|
disableSilentMode();
|
||||||
|
log.error(
|
||||||
|
`Unexpected error during getCachedOrExecute for showTask: ${error.message}`
|
||||||
|
);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'UNEXPECTED_ERROR',
|
||||||
|
message: error.message
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,167 +4,190 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { updateSubtaskById } from '../../../../scripts/modules/task-manager.js';
|
import { updateSubtaskById } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { getAnthropicClientForMCP, getPerplexityClientForMCP } from '../utils/ai-client-utils.js';
|
import {
|
||||||
|
getAnthropicClientForMCP,
|
||||||
|
getPerplexityClientForMCP
|
||||||
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for updateSubtaskById with error handling.
|
* Direct function wrapper for updateSubtaskById with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
|
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @param {Object} context - Context object containing session data.
|
* @param {Object} context - Context object containing session data.
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
export async function updateSubtaskByIdDirect(args, log, context = {}) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
const { session } = context; // Only extract session, not reportProgress
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
|
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Check required parameters
|
// Check required parameters
|
||||||
if (!args.id) {
|
if (!args.id) {
|
||||||
const errorMessage = 'No subtask ID specified. Please provide a subtask ID to update.';
|
const errorMessage =
|
||||||
log.error(errorMessage);
|
'No subtask ID specified. Please provide a subtask ID to update.';
|
||||||
return {
|
log.error(errorMessage);
|
||||||
success: false,
|
return {
|
||||||
error: { code: 'MISSING_SUBTASK_ID', message: errorMessage },
|
success: false,
|
||||||
fromCache: false
|
error: { code: 'MISSING_SUBTASK_ID', message: errorMessage },
|
||||||
};
|
fromCache: false
|
||||||
}
|
};
|
||||||
|
}
|
||||||
if (!args.prompt) {
|
|
||||||
const errorMessage = 'No prompt specified. Please provide a prompt with information to add to the subtask.';
|
if (!args.prompt) {
|
||||||
log.error(errorMessage);
|
const errorMessage =
|
||||||
return {
|
'No prompt specified. Please provide a prompt with information to add to the subtask.';
|
||||||
success: false,
|
log.error(errorMessage);
|
||||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Validate subtask ID format
|
}
|
||||||
const subtaskId = args.id;
|
|
||||||
if (typeof subtaskId !== 'string' && typeof subtaskId !== 'number') {
|
// Validate subtask ID format
|
||||||
const errorMessage = `Invalid subtask ID type: ${typeof subtaskId}. Subtask ID must be a string or number.`;
|
const subtaskId = args.id;
|
||||||
log.error(errorMessage);
|
if (typeof subtaskId !== 'string' && typeof subtaskId !== 'number') {
|
||||||
return {
|
const errorMessage = `Invalid subtask ID type: ${typeof subtaskId}. Subtask ID must be a string or number.`;
|
||||||
success: false,
|
log.error(errorMessage);
|
||||||
error: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage },
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
const subtaskIdStr = String(subtaskId);
|
}
|
||||||
if (!subtaskIdStr.includes('.')) {
|
|
||||||
const errorMessage = `Invalid subtask ID format: ${subtaskIdStr}. Subtask ID must be in format "parentId.subtaskId" (e.g., "5.2").`;
|
const subtaskIdStr = String(subtaskId);
|
||||||
log.error(errorMessage);
|
if (!subtaskIdStr.includes('.')) {
|
||||||
return {
|
const errorMessage = `Invalid subtask ID format: ${subtaskIdStr}. Subtask ID must be in format "parentId.subtaskId" (e.g., "5.2").`;
|
||||||
success: false,
|
log.error(errorMessage);
|
||||||
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage },
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Get tasks file path
|
}
|
||||||
let tasksPath;
|
|
||||||
try {
|
// Get tasks file path
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
let tasksPath;
|
||||||
} catch (error) {
|
try {
|
||||||
log.error(`Error finding tasks file: ${error.message}`);
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
return {
|
} catch (error) {
|
||||||
success: false,
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Get research flag
|
}
|
||||||
const useResearch = args.research === true;
|
|
||||||
|
// Get research flag
|
||||||
log.info(`Updating subtask with ID ${subtaskIdStr} with prompt "${args.prompt}" and research: ${useResearch}`);
|
const useResearch = args.research === true;
|
||||||
|
|
||||||
// Initialize the appropriate AI client based on research flag
|
log.info(
|
||||||
try {
|
`Updating subtask with ID ${subtaskIdStr} with prompt "${args.prompt}" and research: ${useResearch}`
|
||||||
if (useResearch) {
|
);
|
||||||
// Initialize Perplexity client
|
|
||||||
await getPerplexityClientForMCP(session);
|
// Initialize the appropriate AI client based on research flag
|
||||||
} else {
|
try {
|
||||||
// Initialize Anthropic client
|
if (useResearch) {
|
||||||
await getAnthropicClientForMCP(session);
|
// Initialize Perplexity client
|
||||||
}
|
await getPerplexityClientForMCP(session);
|
||||||
} catch (error) {
|
} else {
|
||||||
log.error(`AI client initialization error: ${error.message}`);
|
// Initialize Anthropic client
|
||||||
return {
|
await getAnthropicClientForMCP(session);
|
||||||
success: false,
|
}
|
||||||
error: { code: 'AI_CLIENT_ERROR', message: error.message || 'Failed to initialize AI client' },
|
} catch (error) {
|
||||||
fromCache: false
|
log.error(`AI client initialization error: ${error.message}`);
|
||||||
};
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: {
|
||||||
try {
|
code: 'AI_CLIENT_ERROR',
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
message: error.message || 'Failed to initialize AI client'
|
||||||
enableSilentMode();
|
},
|
||||||
|
fromCache: false
|
||||||
// Create a logger wrapper object to handle logging without breaking the mcpLog[level] calls
|
};
|
||||||
// This ensures outputFormat is set to 'json' while still supporting proper logging
|
}
|
||||||
const logWrapper = {
|
|
||||||
info: (message) => log.info(message),
|
try {
|
||||||
warn: (message) => log.warn(message),
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
error: (message) => log.error(message),
|
enableSilentMode();
|
||||||
debug: (message) => log.debug && log.debug(message),
|
|
||||||
success: (message) => log.info(message) // Map success to info if needed
|
// Create a logger wrapper object to handle logging without breaking the mcpLog[level] calls
|
||||||
};
|
// This ensures outputFormat is set to 'json' while still supporting proper logging
|
||||||
|
const logWrapper = {
|
||||||
// Execute core updateSubtaskById function
|
info: (message) => log.info(message),
|
||||||
// Pass both session and logWrapper as mcpLog to ensure outputFormat is 'json'
|
warn: (message) => log.warn(message),
|
||||||
const updatedSubtask = await updateSubtaskById(tasksPath, subtaskIdStr, args.prompt, useResearch, {
|
error: (message) => log.error(message),
|
||||||
session,
|
debug: (message) => log.debug && log.debug(message),
|
||||||
mcpLog: logWrapper
|
success: (message) => log.info(message) // Map success to info if needed
|
||||||
});
|
};
|
||||||
|
|
||||||
// Restore normal logging
|
// Execute core updateSubtaskById function
|
||||||
disableSilentMode();
|
// Pass both session and logWrapper as mcpLog to ensure outputFormat is 'json'
|
||||||
|
const updatedSubtask = await updateSubtaskById(
|
||||||
// Handle the case where the subtask couldn't be updated (e.g., already marked as done)
|
tasksPath,
|
||||||
if (!updatedSubtask) {
|
subtaskIdStr,
|
||||||
return {
|
args.prompt,
|
||||||
success: false,
|
useResearch,
|
||||||
error: {
|
{
|
||||||
code: 'SUBTASK_UPDATE_FAILED',
|
session,
|
||||||
message: 'Failed to update subtask. It may be marked as completed, or another error occurred.'
|
mcpLog: logWrapper
|
||||||
},
|
}
|
||||||
fromCache: false
|
);
|
||||||
};
|
|
||||||
}
|
// Restore normal logging
|
||||||
|
disableSilentMode();
|
||||||
// Return the updated subtask information
|
|
||||||
return {
|
// Handle the case where the subtask couldn't be updated (e.g., already marked as done)
|
||||||
success: true,
|
if (!updatedSubtask) {
|
||||||
data: {
|
return {
|
||||||
message: `Successfully updated subtask with ID ${subtaskIdStr}`,
|
success: false,
|
||||||
subtaskId: subtaskIdStr,
|
error: {
|
||||||
parentId: subtaskIdStr.split('.')[0],
|
code: 'SUBTASK_UPDATE_FAILED',
|
||||||
subtask: updatedSubtask,
|
message:
|
||||||
tasksPath,
|
'Failed to update subtask. It may be marked as completed, or another error occurred.'
|
||||||
useResearch
|
},
|
||||||
},
|
fromCache: false
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
};
|
||||||
};
|
}
|
||||||
} catch (error) {
|
|
||||||
// Make sure to restore normal logging even if there's an error
|
// Return the updated subtask information
|
||||||
disableSilentMode();
|
return {
|
||||||
throw error; // Rethrow to be caught by outer catch block
|
success: true,
|
||||||
}
|
data: {
|
||||||
} catch (error) {
|
message: `Successfully updated subtask with ID ${subtaskIdStr}`,
|
||||||
// Ensure silent mode is disabled
|
subtaskId: subtaskIdStr,
|
||||||
disableSilentMode();
|
parentId: subtaskIdStr.split('.')[0],
|
||||||
|
subtask: updatedSubtask,
|
||||||
log.error(`Error updating subtask by ID: ${error.message}`);
|
tasksPath,
|
||||||
return {
|
useResearch
|
||||||
success: false,
|
},
|
||||||
error: { code: 'UPDATE_SUBTASK_ERROR', message: error.message || 'Unknown error updating subtask' },
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
fromCache: false
|
};
|
||||||
};
|
} catch (error) {
|
||||||
}
|
// Make sure to restore normal logging even if there's an error
|
||||||
}
|
disableSilentMode();
|
||||||
|
throw error; // Rethrow to be caught by outer catch block
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Ensure silent mode is disabled
|
||||||
|
disableSilentMode();
|
||||||
|
|
||||||
|
log.error(`Error updating subtask by ID: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'UPDATE_SUBTASK_ERROR',
|
||||||
|
message: error.message || 'Unknown error updating subtask'
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,168 +5,181 @@
|
|||||||
|
|
||||||
import { updateTaskById } from '../../../../scripts/modules/task-manager.js';
|
import { updateTaskById } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
import {
|
enableSilentMode,
|
||||||
getAnthropicClientForMCP,
|
disableSilentMode
|
||||||
getPerplexityClientForMCP
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
import {
|
||||||
|
getAnthropicClientForMCP,
|
||||||
|
getPerplexityClientForMCP
|
||||||
} from '../utils/ai-client-utils.js';
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for updateTaskById with error handling.
|
* Direct function wrapper for updateTaskById with error handling.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
|
* @param {Object} args - Command arguments containing id, prompt, useResearch and file path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @param {Object} context - Context object containing session data.
|
* @param {Object} context - Context object containing session data.
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function updateTaskByIdDirect(args, log, context = {}) {
|
export async function updateTaskByIdDirect(args, log, context = {}) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
const { session } = context; // Only extract session, not reportProgress
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log.info(`Updating task with args: ${JSON.stringify(args)}`);
|
log.info(`Updating task with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Check required parameters
|
// Check required parameters
|
||||||
if (!args.id) {
|
if (!args.id) {
|
||||||
const errorMessage = 'No task ID specified. Please provide a task ID to update.';
|
const errorMessage =
|
||||||
log.error(errorMessage);
|
'No task ID specified. Please provide a task ID to update.';
|
||||||
return {
|
log.error(errorMessage);
|
||||||
success: false,
|
return {
|
||||||
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
success: false,
|
||||||
fromCache: false
|
error: { code: 'MISSING_TASK_ID', message: errorMessage },
|
||||||
};
|
fromCache: false
|
||||||
}
|
};
|
||||||
|
}
|
||||||
if (!args.prompt) {
|
|
||||||
const errorMessage = 'No prompt specified. Please provide a prompt with new information for the task update.';
|
if (!args.prompt) {
|
||||||
log.error(errorMessage);
|
const errorMessage =
|
||||||
return {
|
'No prompt specified. Please provide a prompt with new information for the task update.';
|
||||||
success: false,
|
log.error(errorMessage);
|
||||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Parse taskId - handle both string and number values
|
}
|
||||||
let taskId;
|
|
||||||
if (typeof args.id === 'string') {
|
// Parse taskId - handle both string and number values
|
||||||
// Handle subtask IDs (e.g., "5.2")
|
let taskId;
|
||||||
if (args.id.includes('.')) {
|
if (typeof args.id === 'string') {
|
||||||
taskId = args.id; // Keep as string for subtask IDs
|
// Handle subtask IDs (e.g., "5.2")
|
||||||
} else {
|
if (args.id.includes('.')) {
|
||||||
// Parse as integer for main task IDs
|
taskId = args.id; // Keep as string for subtask IDs
|
||||||
taskId = parseInt(args.id, 10);
|
} else {
|
||||||
if (isNaN(taskId)) {
|
// Parse as integer for main task IDs
|
||||||
const errorMessage = `Invalid task ID: ${args.id}. Task ID must be a positive integer or subtask ID (e.g., "5.2").`;
|
taskId = parseInt(args.id, 10);
|
||||||
log.error(errorMessage);
|
if (isNaN(taskId)) {
|
||||||
return {
|
const errorMessage = `Invalid task ID: ${args.id}. Task ID must be a positive integer or subtask ID (e.g., "5.2").`;
|
||||||
success: false,
|
log.error(errorMessage);
|
||||||
error: { code: 'INVALID_TASK_ID', message: errorMessage },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'INVALID_TASK_ID', message: errorMessage },
|
||||||
}
|
fromCache: false
|
||||||
}
|
};
|
||||||
} else {
|
}
|
||||||
taskId = args.id;
|
}
|
||||||
}
|
} else {
|
||||||
|
taskId = args.id;
|
||||||
// Get tasks file path
|
}
|
||||||
let tasksPath;
|
|
||||||
try {
|
// Get tasks file path
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
let tasksPath;
|
||||||
} catch (error) {
|
try {
|
||||||
log.error(`Error finding tasks file: ${error.message}`);
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
return {
|
} catch (error) {
|
||||||
success: false,
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
return {
|
||||||
fromCache: false
|
success: false,
|
||||||
};
|
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Get research flag
|
}
|
||||||
const useResearch = args.research === true;
|
|
||||||
|
// Get research flag
|
||||||
// Initialize appropriate AI client based on research flag
|
const useResearch = args.research === true;
|
||||||
let aiClient;
|
|
||||||
try {
|
// Initialize appropriate AI client based on research flag
|
||||||
if (useResearch) {
|
let aiClient;
|
||||||
log.info('Using Perplexity AI for research-backed task update');
|
try {
|
||||||
aiClient = await getPerplexityClientForMCP(session, log);
|
if (useResearch) {
|
||||||
} else {
|
log.info('Using Perplexity AI for research-backed task update');
|
||||||
log.info('Using Claude AI for task update');
|
aiClient = await getPerplexityClientForMCP(session, log);
|
||||||
aiClient = getAnthropicClientForMCP(session, log);
|
} else {
|
||||||
}
|
log.info('Using Claude AI for task update');
|
||||||
} catch (error) {
|
aiClient = getAnthropicClientForMCP(session, log);
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
}
|
||||||
return {
|
} catch (error) {
|
||||||
success: false,
|
log.error(`Failed to initialize AI client: ${error.message}`);
|
||||||
error: {
|
return {
|
||||||
code: 'AI_CLIENT_ERROR',
|
success: false,
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
error: {
|
||||||
},
|
code: 'AI_CLIENT_ERROR',
|
||||||
fromCache: false
|
message: `Cannot initialize AI client: ${error.message}`
|
||||||
};
|
},
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
log.info(`Updating task with ID ${taskId} with prompt "${args.prompt}" and research: ${useResearch}`);
|
}
|
||||||
|
|
||||||
try {
|
log.info(
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
`Updating task with ID ${taskId} with prompt "${args.prompt}" and research: ${useResearch}`
|
||||||
enableSilentMode();
|
);
|
||||||
|
|
||||||
// Create a logger wrapper that matches what updateTaskById expects
|
try {
|
||||||
const logWrapper = {
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
info: (message) => log.info(message),
|
enableSilentMode();
|
||||||
warn: (message) => log.warn(message),
|
|
||||||
error: (message) => log.error(message),
|
// Create a logger wrapper that matches what updateTaskById expects
|
||||||
debug: (message) => log.debug && log.debug(message),
|
const logWrapper = {
|
||||||
success: (message) => log.info(message) // Map success to info since many loggers don't have success
|
info: (message) => log.info(message),
|
||||||
};
|
warn: (message) => log.warn(message),
|
||||||
|
error: (message) => log.error(message),
|
||||||
// Execute core updateTaskById function with proper parameters
|
debug: (message) => log.debug && log.debug(message),
|
||||||
await updateTaskById(
|
success: (message) => log.info(message) // Map success to info since many loggers don't have success
|
||||||
tasksPath,
|
};
|
||||||
taskId,
|
|
||||||
args.prompt,
|
// Execute core updateTaskById function with proper parameters
|
||||||
useResearch,
|
await updateTaskById(
|
||||||
{
|
tasksPath,
|
||||||
mcpLog: logWrapper, // Use our wrapper object that has the expected method structure
|
taskId,
|
||||||
session
|
args.prompt,
|
||||||
},
|
useResearch,
|
||||||
'json'
|
{
|
||||||
);
|
mcpLog: logWrapper, // Use our wrapper object that has the expected method structure
|
||||||
|
session
|
||||||
// Since updateTaskById doesn't return a value but modifies the tasks file,
|
},
|
||||||
// we'll return a success message
|
'json'
|
||||||
return {
|
);
|
||||||
success: true,
|
|
||||||
data: {
|
// Since updateTaskById doesn't return a value but modifies the tasks file,
|
||||||
message: `Successfully updated task with ID ${taskId} based on the prompt`,
|
// we'll return a success message
|
||||||
taskId,
|
return {
|
||||||
tasksPath,
|
success: true,
|
||||||
useResearch
|
data: {
|
||||||
},
|
message: `Successfully updated task with ID ${taskId} based on the prompt`,
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
taskId,
|
||||||
};
|
tasksPath,
|
||||||
} catch (error) {
|
useResearch
|
||||||
log.error(`Error updating task by ID: ${error.message}`);
|
},
|
||||||
return {
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
success: false,
|
};
|
||||||
error: { code: 'UPDATE_TASK_ERROR', message: error.message || 'Unknown error updating task' },
|
} catch (error) {
|
||||||
fromCache: false
|
log.error(`Error updating task by ID: ${error.message}`);
|
||||||
};
|
return {
|
||||||
} finally {
|
success: false,
|
||||||
// Make sure to restore normal logging even if there's an error
|
error: {
|
||||||
disableSilentMode();
|
code: 'UPDATE_TASK_ERROR',
|
||||||
}
|
message: error.message || 'Unknown error updating task'
|
||||||
} catch (error) {
|
},
|
||||||
// Ensure silent mode is disabled
|
fromCache: false
|
||||||
disableSilentMode();
|
};
|
||||||
|
} finally {
|
||||||
log.error(`Error updating task by ID: ${error.message}`);
|
// Make sure to restore normal logging even if there's an error
|
||||||
return {
|
disableSilentMode();
|
||||||
success: false,
|
}
|
||||||
error: { code: 'UPDATE_TASK_ERROR', message: error.message || 'Unknown error updating task' },
|
} catch (error) {
|
||||||
fromCache: false
|
// Ensure silent mode is disabled
|
||||||
};
|
disableSilentMode();
|
||||||
}
|
|
||||||
}
|
log.error(`Error updating task by ID: ${error.message}`);
|
||||||
|
return {
|
||||||
|
success: false,
|
||||||
|
error: {
|
||||||
|
code: 'UPDATE_TASK_ERROR',
|
||||||
|
message: error.message || 'Unknown error updating task'
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,168 +4,177 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import {
|
import {
|
||||||
getAnthropicClientForMCP,
|
getAnthropicClientForMCP,
|
||||||
getPerplexityClientForMCP
|
getPerplexityClientForMCP
|
||||||
} from '../utils/ai-client-utils.js';
|
} from '../utils/ai-client-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for updating tasks based on new context/prompt.
|
* Direct function wrapper for updating tasks based on new context/prompt.
|
||||||
*
|
*
|
||||||
* @param {Object} args - Command arguments containing fromId, prompt, useResearch and file path options.
|
* @param {Object} args - Command arguments containing fromId, prompt, useResearch and file path options.
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
* @param {Object} context - Context object containing session data.
|
* @param {Object} context - Context object containing session data.
|
||||||
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
* @returns {Promise<Object>} - Result object with success status and data/error information.
|
||||||
*/
|
*/
|
||||||
export async function updateTasksDirect(args, log, context = {}) {
|
export async function updateTasksDirect(args, log, context = {}) {
|
||||||
const { session } = context; // Only extract session, not reportProgress
|
const { session } = context; // Only extract session, not reportProgress
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
|
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
// Check for the common mistake of using 'id' instead of 'from'
|
// Check for the common mistake of using 'id' instead of 'from'
|
||||||
if (args.id !== undefined && args.from === undefined) {
|
if (args.id !== undefined && args.from === undefined) {
|
||||||
const errorMessage = "You specified 'id' parameter but 'update' requires 'from' parameter. Use 'from' for this tool or use 'update_task' tool if you want to update a single task.";
|
const errorMessage =
|
||||||
log.error(errorMessage);
|
"You specified 'id' parameter but 'update' requires 'from' parameter. Use 'from' for this tool or use 'update_task' tool if you want to update a single task.";
|
||||||
return {
|
log.error(errorMessage);
|
||||||
success: false,
|
return {
|
||||||
error: {
|
success: false,
|
||||||
code: 'PARAMETER_MISMATCH',
|
error: {
|
||||||
message: errorMessage,
|
code: 'PARAMETER_MISMATCH',
|
||||||
suggestion: "Use 'from' parameter instead of 'id', or use the 'update_task' tool for single task updates"
|
message: errorMessage,
|
||||||
},
|
suggestion:
|
||||||
fromCache: false
|
"Use 'from' parameter instead of 'id', or use the 'update_task' tool for single task updates"
|
||||||
};
|
},
|
||||||
}
|
fromCache: false
|
||||||
|
};
|
||||||
// Check required parameters
|
}
|
||||||
if (!args.from) {
|
|
||||||
const errorMessage = 'No from ID specified. Please provide a task ID to start updating from.';
|
// Check required parameters
|
||||||
log.error(errorMessage);
|
if (!args.from) {
|
||||||
return {
|
const errorMessage =
|
||||||
success: false,
|
'No from ID specified. Please provide a task ID to start updating from.';
|
||||||
error: { code: 'MISSING_FROM_ID', message: errorMessage },
|
log.error(errorMessage);
|
||||||
fromCache: false
|
return {
|
||||||
};
|
success: false,
|
||||||
}
|
error: { code: 'MISSING_FROM_ID', message: errorMessage },
|
||||||
|
fromCache: false
|
||||||
if (!args.prompt) {
|
};
|
||||||
const errorMessage = 'No prompt specified. Please provide a prompt with new context for task updates.';
|
}
|
||||||
log.error(errorMessage);
|
|
||||||
return {
|
if (!args.prompt) {
|
||||||
success: false,
|
const errorMessage =
|
||||||
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
'No prompt specified. Please provide a prompt with new context for task updates.';
|
||||||
fromCache: false
|
log.error(errorMessage);
|
||||||
};
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: { code: 'MISSING_PROMPT', message: errorMessage },
|
||||||
// Parse fromId - handle both string and number values
|
fromCache: false
|
||||||
let fromId;
|
};
|
||||||
if (typeof args.from === 'string') {
|
}
|
||||||
fromId = parseInt(args.from, 10);
|
|
||||||
if (isNaN(fromId)) {
|
// Parse fromId - handle both string and number values
|
||||||
const errorMessage = `Invalid from ID: ${args.from}. Task ID must be a positive integer.`;
|
let fromId;
|
||||||
log.error(errorMessage);
|
if (typeof args.from === 'string') {
|
||||||
return {
|
fromId = parseInt(args.from, 10);
|
||||||
success: false,
|
if (isNaN(fromId)) {
|
||||||
error: { code: 'INVALID_FROM_ID', message: errorMessage },
|
const errorMessage = `Invalid from ID: ${args.from}. Task ID must be a positive integer.`;
|
||||||
fromCache: false
|
log.error(errorMessage);
|
||||||
};
|
return {
|
||||||
}
|
success: false,
|
||||||
} else {
|
error: { code: 'INVALID_FROM_ID', message: errorMessage },
|
||||||
fromId = args.from;
|
fromCache: false
|
||||||
}
|
};
|
||||||
|
}
|
||||||
// Get tasks file path
|
} else {
|
||||||
let tasksPath;
|
fromId = args.from;
|
||||||
try {
|
}
|
||||||
tasksPath = findTasksJsonPath(args, log);
|
|
||||||
} catch (error) {
|
// Get tasks file path
|
||||||
log.error(`Error finding tasks file: ${error.message}`);
|
let tasksPath;
|
||||||
return {
|
try {
|
||||||
success: false,
|
tasksPath = findTasksJsonPath(args, log);
|
||||||
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
} catch (error) {
|
||||||
fromCache: false
|
log.error(`Error finding tasks file: ${error.message}`);
|
||||||
};
|
return {
|
||||||
}
|
success: false,
|
||||||
|
error: { code: 'TASKS_FILE_ERROR', message: error.message },
|
||||||
// Get research flag
|
fromCache: false
|
||||||
const useResearch = args.research === true;
|
};
|
||||||
|
}
|
||||||
// Initialize appropriate AI client based on research flag
|
|
||||||
let aiClient;
|
// Get research flag
|
||||||
try {
|
const useResearch = args.research === true;
|
||||||
if (useResearch) {
|
|
||||||
log.info('Using Perplexity AI for research-backed task updates');
|
// Initialize appropriate AI client based on research flag
|
||||||
aiClient = await getPerplexityClientForMCP(session, log);
|
let aiClient;
|
||||||
} else {
|
try {
|
||||||
log.info('Using Claude AI for task updates');
|
if (useResearch) {
|
||||||
aiClient = getAnthropicClientForMCP(session, log);
|
log.info('Using Perplexity AI for research-backed task updates');
|
||||||
}
|
aiClient = await getPerplexityClientForMCP(session, log);
|
||||||
} catch (error) {
|
} else {
|
||||||
log.error(`Failed to initialize AI client: ${error.message}`);
|
log.info('Using Claude AI for task updates');
|
||||||
return {
|
aiClient = getAnthropicClientForMCP(session, log);
|
||||||
success: false,
|
}
|
||||||
error: {
|
} catch (error) {
|
||||||
code: 'AI_CLIENT_ERROR',
|
log.error(`Failed to initialize AI client: ${error.message}`);
|
||||||
message: `Cannot initialize AI client: ${error.message}`
|
return {
|
||||||
},
|
success: false,
|
||||||
fromCache: false
|
error: {
|
||||||
};
|
code: 'AI_CLIENT_ERROR',
|
||||||
}
|
message: `Cannot initialize AI client: ${error.message}`
|
||||||
|
},
|
||||||
log.info(`Updating tasks from ID ${fromId} with prompt "${args.prompt}" and research: ${useResearch}`);
|
fromCache: false
|
||||||
|
};
|
||||||
try {
|
}
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
|
||||||
enableSilentMode();
|
log.info(
|
||||||
|
`Updating tasks from ID ${fromId} with prompt "${args.prompt}" and research: ${useResearch}`
|
||||||
// Execute core updateTasks function, passing the AI client and session
|
);
|
||||||
await updateTasks(
|
|
||||||
tasksPath,
|
try {
|
||||||
fromId,
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
args.prompt,
|
enableSilentMode();
|
||||||
useResearch,
|
|
||||||
{
|
// Execute core updateTasks function, passing the AI client and session
|
||||||
mcpLog: log,
|
await updateTasks(tasksPath, fromId, args.prompt, useResearch, {
|
||||||
session
|
mcpLog: log,
|
||||||
}
|
session
|
||||||
);
|
});
|
||||||
|
|
||||||
// Since updateTasks doesn't return a value but modifies the tasks file,
|
// Since updateTasks doesn't return a value but modifies the tasks file,
|
||||||
// we'll return a success message
|
// we'll return a success message
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
message: `Successfully updated tasks from ID ${fromId} based on the prompt`,
|
message: `Successfully updated tasks from ID ${fromId} based on the prompt`,
|
||||||
fromId,
|
fromId,
|
||||||
tasksPath,
|
tasksPath,
|
||||||
useResearch
|
useResearch
|
||||||
},
|
},
|
||||||
fromCache: false // This operation always modifies state and should never be cached
|
fromCache: false // This operation always modifies state and should never be cached
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error updating tasks: ${error.message}`);
|
log.error(`Error updating tasks: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'UPDATE_TASKS_ERROR', message: error.message || 'Unknown error updating tasks' },
|
error: {
|
||||||
fromCache: false
|
code: 'UPDATE_TASKS_ERROR',
|
||||||
};
|
message: error.message || 'Unknown error updating tasks'
|
||||||
} finally {
|
},
|
||||||
// Make sure to restore normal logging even if there's an error
|
fromCache: false
|
||||||
disableSilentMode();
|
};
|
||||||
}
|
} finally {
|
||||||
} catch (error) {
|
// Make sure to restore normal logging even if there's an error
|
||||||
// Ensure silent mode is disabled
|
disableSilentMode();
|
||||||
disableSilentMode();
|
}
|
||||||
|
} catch (error) {
|
||||||
log.error(`Error updating tasks: ${error.message}`);
|
// Ensure silent mode is disabled
|
||||||
return {
|
disableSilentMode();
|
||||||
success: false,
|
|
||||||
error: { code: 'UPDATE_TASKS_ERROR', message: error.message || 'Unknown error updating tasks' },
|
log.error(`Error updating tasks: ${error.message}`);
|
||||||
fromCache: false
|
return {
|
||||||
};
|
success: false,
|
||||||
}
|
error: {
|
||||||
}
|
code: 'UPDATE_TASKS_ERROR',
|
||||||
|
message: error.message || 'Unknown error updating tasks'
|
||||||
|
},
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,7 +4,10 @@
|
|||||||
|
|
||||||
import { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
import { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
import { enableSilentMode, disableSilentMode } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -16,50 +19,50 @@ import fs from 'fs';
|
|||||||
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
|
||||||
*/
|
*/
|
||||||
export async function validateDependenciesDirect(args, log) {
|
export async function validateDependenciesDirect(args, log) {
|
||||||
try {
|
try {
|
||||||
log.info(`Validating dependencies in tasks...`);
|
log.info(`Validating dependencies in tasks...`);
|
||||||
|
|
||||||
// Find the tasks.json path
|
// Find the tasks.json path
|
||||||
const tasksPath = findTasksJsonPath(args, log);
|
const tasksPath = findTasksJsonPath(args, log);
|
||||||
|
|
||||||
// Verify the file exists
|
// Verify the file exists
|
||||||
if (!fs.existsSync(tasksPath)) {
|
if (!fs.existsSync(tasksPath)) {
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'FILE_NOT_FOUND',
|
code: 'FILE_NOT_FOUND',
|
||||||
message: `Tasks file not found at ${tasksPath}`
|
message: `Tasks file not found at ${tasksPath}`
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable silent mode to prevent console logs from interfering with JSON response
|
// Enable silent mode to prevent console logs from interfering with JSON response
|
||||||
enableSilentMode();
|
enableSilentMode();
|
||||||
|
|
||||||
// Call the original command function
|
// Call the original command function
|
||||||
await validateDependenciesCommand(tasksPath);
|
await validateDependenciesCommand(tasksPath);
|
||||||
|
|
||||||
// Restore normal logging
|
// Restore normal logging
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
message: 'Dependencies validated successfully',
|
message: 'Dependencies validated successfully',
|
||||||
tasksPath
|
tasksPath
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Make sure to restore normal logging even if there's an error
|
// Make sure to restore normal logging even if there's an error
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
log.error(`Error validating dependencies: ${error.message}`);
|
log.error(`Error validating dependencies: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'VALIDATION_ERROR',
|
code: 'VALIDATION_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,64 +33,64 @@ import { removeTaskDirect } from './direct-functions/remove-task.js';
|
|||||||
export { findTasksJsonPath } from './utils/path-utils.js';
|
export { findTasksJsonPath } from './utils/path-utils.js';
|
||||||
|
|
||||||
// Re-export AI client utilities
|
// Re-export AI client utilities
|
||||||
export {
|
export {
|
||||||
getAnthropicClientForMCP,
|
getAnthropicClientForMCP,
|
||||||
getPerplexityClientForMCP,
|
getPerplexityClientForMCP,
|
||||||
getModelConfig,
|
getModelConfig,
|
||||||
getBestAvailableAIModel,
|
getBestAvailableAIModel,
|
||||||
handleClaudeError
|
handleClaudeError
|
||||||
} from './utils/ai-client-utils.js';
|
} from './utils/ai-client-utils.js';
|
||||||
|
|
||||||
// Use Map for potential future enhancements like introspection or dynamic dispatch
|
// Use Map for potential future enhancements like introspection or dynamic dispatch
|
||||||
export const directFunctions = new Map([
|
export const directFunctions = new Map([
|
||||||
['listTasksDirect', listTasksDirect],
|
['listTasksDirect', listTasksDirect],
|
||||||
['getCacheStatsDirect', getCacheStatsDirect],
|
['getCacheStatsDirect', getCacheStatsDirect],
|
||||||
['parsePRDDirect', parsePRDDirect],
|
['parsePRDDirect', parsePRDDirect],
|
||||||
['updateTasksDirect', updateTasksDirect],
|
['updateTasksDirect', updateTasksDirect],
|
||||||
['updateTaskByIdDirect', updateTaskByIdDirect],
|
['updateTaskByIdDirect', updateTaskByIdDirect],
|
||||||
['updateSubtaskByIdDirect', updateSubtaskByIdDirect],
|
['updateSubtaskByIdDirect', updateSubtaskByIdDirect],
|
||||||
['generateTaskFilesDirect', generateTaskFilesDirect],
|
['generateTaskFilesDirect', generateTaskFilesDirect],
|
||||||
['setTaskStatusDirect', setTaskStatusDirect],
|
['setTaskStatusDirect', setTaskStatusDirect],
|
||||||
['showTaskDirect', showTaskDirect],
|
['showTaskDirect', showTaskDirect],
|
||||||
['nextTaskDirect', nextTaskDirect],
|
['nextTaskDirect', nextTaskDirect],
|
||||||
['expandTaskDirect', expandTaskDirect],
|
['expandTaskDirect', expandTaskDirect],
|
||||||
['addTaskDirect', addTaskDirect],
|
['addTaskDirect', addTaskDirect],
|
||||||
['addSubtaskDirect', addSubtaskDirect],
|
['addSubtaskDirect', addSubtaskDirect],
|
||||||
['removeSubtaskDirect', removeSubtaskDirect],
|
['removeSubtaskDirect', removeSubtaskDirect],
|
||||||
['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],
|
['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],
|
||||||
['clearSubtasksDirect', clearSubtasksDirect],
|
['clearSubtasksDirect', clearSubtasksDirect],
|
||||||
['expandAllTasksDirect', expandAllTasksDirect],
|
['expandAllTasksDirect', expandAllTasksDirect],
|
||||||
['removeDependencyDirect', removeDependencyDirect],
|
['removeDependencyDirect', removeDependencyDirect],
|
||||||
['validateDependenciesDirect', validateDependenciesDirect],
|
['validateDependenciesDirect', validateDependenciesDirect],
|
||||||
['fixDependenciesDirect', fixDependenciesDirect],
|
['fixDependenciesDirect', fixDependenciesDirect],
|
||||||
['complexityReportDirect', complexityReportDirect],
|
['complexityReportDirect', complexityReportDirect],
|
||||||
['addDependencyDirect', addDependencyDirect],
|
['addDependencyDirect', addDependencyDirect],
|
||||||
['removeTaskDirect', removeTaskDirect]
|
['removeTaskDirect', removeTaskDirect]
|
||||||
]);
|
]);
|
||||||
|
|
||||||
// Re-export all direct function implementations
|
// Re-export all direct function implementations
|
||||||
export {
|
export {
|
||||||
listTasksDirect,
|
listTasksDirect,
|
||||||
getCacheStatsDirect,
|
getCacheStatsDirect,
|
||||||
parsePRDDirect,
|
parsePRDDirect,
|
||||||
updateTasksDirect,
|
updateTasksDirect,
|
||||||
updateTaskByIdDirect,
|
updateTaskByIdDirect,
|
||||||
updateSubtaskByIdDirect,
|
updateSubtaskByIdDirect,
|
||||||
generateTaskFilesDirect,
|
generateTaskFilesDirect,
|
||||||
setTaskStatusDirect,
|
setTaskStatusDirect,
|
||||||
showTaskDirect,
|
showTaskDirect,
|
||||||
nextTaskDirect,
|
nextTaskDirect,
|
||||||
expandTaskDirect,
|
expandTaskDirect,
|
||||||
addTaskDirect,
|
addTaskDirect,
|
||||||
addSubtaskDirect,
|
addSubtaskDirect,
|
||||||
removeSubtaskDirect,
|
removeSubtaskDirect,
|
||||||
analyzeTaskComplexityDirect,
|
analyzeTaskComplexityDirect,
|
||||||
clearSubtasksDirect,
|
clearSubtasksDirect,
|
||||||
expandAllTasksDirect,
|
expandAllTasksDirect,
|
||||||
removeDependencyDirect,
|
removeDependencyDirect,
|
||||||
validateDependenciesDirect,
|
validateDependenciesDirect,
|
||||||
fixDependenciesDirect,
|
fixDependenciesDirect,
|
||||||
complexityReportDirect,
|
complexityReportDirect,
|
||||||
addDependencyDirect,
|
addDependencyDirect,
|
||||||
removeTaskDirect
|
removeTaskDirect
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ dotenv.config();
|
|||||||
|
|
||||||
// Default model configuration from CLI environment
|
// Default model configuration from CLI environment
|
||||||
const DEFAULT_MODEL_CONFIG = {
|
const DEFAULT_MODEL_CONFIG = {
|
||||||
model: 'claude-3-7-sonnet-20250219',
|
model: 'claude-3-7-sonnet-20250219',
|
||||||
maxTokens: 64000,
|
maxTokens: 64000,
|
||||||
temperature: 0.2
|
temperature: 0.2
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -24,25 +24,28 @@ const DEFAULT_MODEL_CONFIG = {
|
|||||||
* @throws {Error} If API key is missing
|
* @throws {Error} If API key is missing
|
||||||
*/
|
*/
|
||||||
export function getAnthropicClientForMCP(session, log = console) {
|
export function getAnthropicClientForMCP(session, log = console) {
|
||||||
try {
|
try {
|
||||||
// Extract API key from session.env or fall back to environment variables
|
// Extract API key from session.env or fall back to environment variables
|
||||||
const apiKey = session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY;
|
const apiKey =
|
||||||
|
session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY;
|
||||||
if (!apiKey) {
|
|
||||||
throw new Error('ANTHROPIC_API_KEY not found in session environment or process.env');
|
if (!apiKey) {
|
||||||
}
|
throw new Error(
|
||||||
|
'ANTHROPIC_API_KEY not found in session environment or process.env'
|
||||||
// Initialize and return a new Anthropic client
|
);
|
||||||
return new Anthropic({
|
}
|
||||||
apiKey,
|
|
||||||
defaultHeaders: {
|
// Initialize and return a new Anthropic client
|
||||||
'anthropic-beta': 'output-128k-2025-02-19' // Include header for increased token limit
|
return new Anthropic({
|
||||||
}
|
apiKey,
|
||||||
});
|
defaultHeaders: {
|
||||||
} catch (error) {
|
'anthropic-beta': 'output-128k-2025-02-19' // Include header for increased token limit
|
||||||
log.error(`Failed to initialize Anthropic client: ${error.message}`);
|
}
|
||||||
throw error;
|
});
|
||||||
}
|
} catch (error) {
|
||||||
|
log.error(`Failed to initialize Anthropic client: ${error.message}`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -53,26 +56,29 @@ export function getAnthropicClientForMCP(session, log = console) {
|
|||||||
* @throws {Error} If API key is missing or OpenAI package can't be imported
|
* @throws {Error} If API key is missing or OpenAI package can't be imported
|
||||||
*/
|
*/
|
||||||
export async function getPerplexityClientForMCP(session, log = console) {
|
export async function getPerplexityClientForMCP(session, log = console) {
|
||||||
try {
|
try {
|
||||||
// Extract API key from session.env or fall back to environment variables
|
// Extract API key from session.env or fall back to environment variables
|
||||||
const apiKey = session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY;
|
const apiKey =
|
||||||
|
session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY;
|
||||||
if (!apiKey) {
|
|
||||||
throw new Error('PERPLEXITY_API_KEY not found in session environment or process.env');
|
if (!apiKey) {
|
||||||
}
|
throw new Error(
|
||||||
|
'PERPLEXITY_API_KEY not found in session environment or process.env'
|
||||||
// Dynamically import OpenAI (it may not be used in all contexts)
|
);
|
||||||
const { default: OpenAI } = await import('openai');
|
}
|
||||||
|
|
||||||
// Initialize and return a new OpenAI client configured for Perplexity
|
// Dynamically import OpenAI (it may not be used in all contexts)
|
||||||
return new OpenAI({
|
const { default: OpenAI } = await import('openai');
|
||||||
apiKey,
|
|
||||||
baseURL: 'https://api.perplexity.ai'
|
// Initialize and return a new OpenAI client configured for Perplexity
|
||||||
});
|
return new OpenAI({
|
||||||
} catch (error) {
|
apiKey,
|
||||||
log.error(`Failed to initialize Perplexity client: ${error.message}`);
|
baseURL: 'https://api.perplexity.ai'
|
||||||
throw error;
|
});
|
||||||
}
|
} catch (error) {
|
||||||
|
log.error(`Failed to initialize Perplexity client: ${error.message}`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -82,12 +88,12 @@ export async function getPerplexityClientForMCP(session, log = console) {
|
|||||||
* @returns {Object} Model configuration with model, maxTokens, and temperature
|
* @returns {Object} Model configuration with model, maxTokens, and temperature
|
||||||
*/
|
*/
|
||||||
export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
|
export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
|
||||||
// Get values from session or fall back to defaults
|
// Get values from session or fall back to defaults
|
||||||
return {
|
return {
|
||||||
model: session?.env?.MODEL || defaults.model,
|
model: session?.env?.MODEL || defaults.model,
|
||||||
maxTokens: parseInt(session?.env?.MAX_TOKENS || defaults.maxTokens),
|
maxTokens: parseInt(session?.env?.MAX_TOKENS || defaults.maxTokens),
|
||||||
temperature: parseFloat(session?.env?.TEMPERATURE || defaults.temperature)
|
temperature: parseFloat(session?.env?.TEMPERATURE || defaults.temperature)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -100,59 +106,78 @@ export function getModelConfig(session, defaults = DEFAULT_MODEL_CONFIG) {
|
|||||||
* @returns {Promise<Object>} Selected model info with type and client
|
* @returns {Promise<Object>} Selected model info with type and client
|
||||||
* @throws {Error} If no AI models are available
|
* @throws {Error} If no AI models are available
|
||||||
*/
|
*/
|
||||||
export async function getBestAvailableAIModel(session, options = {}, log = console) {
|
export async function getBestAvailableAIModel(
|
||||||
const { requiresResearch = false, claudeOverloaded = false } = options;
|
session,
|
||||||
|
options = {},
|
||||||
// Test case: When research is needed but no Perplexity, use Claude
|
log = console
|
||||||
if (requiresResearch &&
|
) {
|
||||||
!(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY) &&
|
const { requiresResearch = false, claudeOverloaded = false } = options;
|
||||||
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
|
||||||
try {
|
// Test case: When research is needed but no Perplexity, use Claude
|
||||||
log.warn('Perplexity not available for research, using Claude');
|
if (
|
||||||
const client = getAnthropicClientForMCP(session, log);
|
requiresResearch &&
|
||||||
return { type: 'claude', client };
|
!(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY) &&
|
||||||
} catch (error) {
|
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
|
||||||
log.error(`Claude not available: ${error.message}`);
|
) {
|
||||||
throw new Error('No AI models available for research');
|
try {
|
||||||
}
|
log.warn('Perplexity not available for research, using Claude');
|
||||||
}
|
const client = getAnthropicClientForMCP(session, log);
|
||||||
|
return { type: 'claude', client };
|
||||||
// Regular path: Perplexity for research when available
|
} catch (error) {
|
||||||
if (requiresResearch && (session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY)) {
|
log.error(`Claude not available: ${error.message}`);
|
||||||
try {
|
throw new Error('No AI models available for research');
|
||||||
const client = await getPerplexityClientForMCP(session, log);
|
}
|
||||||
return { type: 'perplexity', client };
|
}
|
||||||
} catch (error) {
|
|
||||||
log.warn(`Perplexity not available: ${error.message}`);
|
// Regular path: Perplexity for research when available
|
||||||
// Fall through to Claude as backup
|
if (
|
||||||
}
|
requiresResearch &&
|
||||||
}
|
(session?.env?.PERPLEXITY_API_KEY || process.env.PERPLEXITY_API_KEY)
|
||||||
|
) {
|
||||||
// Test case: Claude for overloaded scenario
|
try {
|
||||||
if (claudeOverloaded && (session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
const client = await getPerplexityClientForMCP(session, log);
|
||||||
try {
|
return { type: 'perplexity', client };
|
||||||
log.warn('Claude is overloaded but no alternatives are available. Proceeding with Claude anyway.');
|
} catch (error) {
|
||||||
const client = getAnthropicClientForMCP(session, log);
|
log.warn(`Perplexity not available: ${error.message}`);
|
||||||
return { type: 'claude', client };
|
// Fall through to Claude as backup
|
||||||
} catch (error) {
|
}
|
||||||
log.error(`Claude not available despite being overloaded: ${error.message}`);
|
}
|
||||||
throw new Error('No AI models available');
|
|
||||||
}
|
// Test case: Claude for overloaded scenario
|
||||||
}
|
if (
|
||||||
|
claudeOverloaded &&
|
||||||
// Default case: Use Claude when available and not overloaded
|
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
|
||||||
if (!claudeOverloaded && (session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)) {
|
) {
|
||||||
try {
|
try {
|
||||||
const client = getAnthropicClientForMCP(session, log);
|
log.warn(
|
||||||
return { type: 'claude', client };
|
'Claude is overloaded but no alternatives are available. Proceeding with Claude anyway.'
|
||||||
} catch (error) {
|
);
|
||||||
log.warn(`Claude not available: ${error.message}`);
|
const client = getAnthropicClientForMCP(session, log);
|
||||||
// Fall through to error if no other options
|
return { type: 'claude', client };
|
||||||
}
|
} catch (error) {
|
||||||
}
|
log.error(
|
||||||
|
`Claude not available despite being overloaded: ${error.message}`
|
||||||
// If we got here, no models were successfully initialized
|
);
|
||||||
throw new Error('No AI models available. Please check your API keys.');
|
throw new Error('No AI models available');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default case: Use Claude when available and not overloaded
|
||||||
|
if (
|
||||||
|
!claudeOverloaded &&
|
||||||
|
(session?.env?.ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY)
|
||||||
|
) {
|
||||||
|
try {
|
||||||
|
const client = getAnthropicClientForMCP(session, log);
|
||||||
|
return { type: 'claude', client };
|
||||||
|
} catch (error) {
|
||||||
|
log.warn(`Claude not available: ${error.message}`);
|
||||||
|
// Fall through to error if no other options
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we got here, no models were successfully initialized
|
||||||
|
throw new Error('No AI models available. Please check your API keys.');
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -161,28 +186,28 @@ export async function getBestAvailableAIModel(session, options = {}, log = conso
|
|||||||
* @returns {string} User-friendly error message
|
* @returns {string} User-friendly error message
|
||||||
*/
|
*/
|
||||||
export function handleClaudeError(error) {
|
export function handleClaudeError(error) {
|
||||||
// Check if it's a structured error response
|
// Check if it's a structured error response
|
||||||
if (error.type === 'error' && error.error) {
|
if (error.type === 'error' && error.error) {
|
||||||
switch (error.error.type) {
|
switch (error.error.type) {
|
||||||
case 'overloaded_error':
|
case 'overloaded_error':
|
||||||
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
|
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
|
||||||
case 'rate_limit_error':
|
case 'rate_limit_error':
|
||||||
return 'You have exceeded the rate limit. Please wait a few minutes before making more requests.';
|
return 'You have exceeded the rate limit. Please wait a few minutes before making more requests.';
|
||||||
case 'invalid_request_error':
|
case 'invalid_request_error':
|
||||||
return 'There was an issue with the request format. If this persists, please report it as a bug.';
|
return 'There was an issue with the request format. If this persists, please report it as a bug.';
|
||||||
default:
|
default:
|
||||||
return `Claude API error: ${error.error.message}`;
|
return `Claude API error: ${error.error.message}`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for network/timeout errors
|
// Check for network/timeout errors
|
||||||
if (error.message?.toLowerCase().includes('timeout')) {
|
if (error.message?.toLowerCase().includes('timeout')) {
|
||||||
return 'The request to Claude timed out. Please try again.';
|
return 'The request to Claude timed out. Please try again.';
|
||||||
}
|
}
|
||||||
if (error.message?.toLowerCase().includes('network')) {
|
if (error.message?.toLowerCase().includes('network')) {
|
||||||
return 'There was a network error connecting to Claude. Please check your internet connection and try again.';
|
return 'There was a network error connecting to Claude. Please check your internet connection and try again.';
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default error message
|
// Default error message
|
||||||
return `Error communicating with Claude: ${error.message}`;
|
return `Error communicating with Claude: ${error.message}`;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,213 +1,247 @@
|
|||||||
import { v4 as uuidv4 } from 'uuid';
|
import { v4 as uuidv4 } from 'uuid';
|
||||||
|
|
||||||
class AsyncOperationManager {
|
class AsyncOperationManager {
|
||||||
constructor() {
|
constructor() {
|
||||||
this.operations = new Map(); // Stores active operation state
|
this.operations = new Map(); // Stores active operation state
|
||||||
this.completedOperations = new Map(); // Stores completed operations
|
this.completedOperations = new Map(); // Stores completed operations
|
||||||
this.maxCompletedOperations = 100; // Maximum number of completed operations to store
|
this.maxCompletedOperations = 100; // Maximum number of completed operations to store
|
||||||
this.listeners = new Map(); // For potential future notifications
|
this.listeners = new Map(); // For potential future notifications
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Adds an operation to be executed asynchronously.
|
* Adds an operation to be executed asynchronously.
|
||||||
* @param {Function} operationFn - The async function to execute (e.g., a Direct function).
|
* @param {Function} operationFn - The async function to execute (e.g., a Direct function).
|
||||||
* @param {Object} args - Arguments to pass to the operationFn.
|
* @param {Object} args - Arguments to pass to the operationFn.
|
||||||
* @param {Object} context - The MCP tool context { log, reportProgress, session }.
|
* @param {Object} context - The MCP tool context { log, reportProgress, session }.
|
||||||
* @returns {string} The unique ID assigned to this operation.
|
* @returns {string} The unique ID assigned to this operation.
|
||||||
*/
|
*/
|
||||||
addOperation(operationFn, args, context) {
|
addOperation(operationFn, args, context) {
|
||||||
const operationId = `op-${uuidv4()}`;
|
const operationId = `op-${uuidv4()}`;
|
||||||
const operation = {
|
const operation = {
|
||||||
id: operationId,
|
id: operationId,
|
||||||
status: 'pending',
|
status: 'pending',
|
||||||
startTime: Date.now(),
|
startTime: Date.now(),
|
||||||
endTime: null,
|
endTime: null,
|
||||||
result: null,
|
result: null,
|
||||||
error: null,
|
error: null,
|
||||||
// Store necessary parts of context, especially log for background execution
|
// Store necessary parts of context, especially log for background execution
|
||||||
log: context.log,
|
log: context.log,
|
||||||
reportProgress: context.reportProgress, // Pass reportProgress through
|
reportProgress: context.reportProgress, // Pass reportProgress through
|
||||||
session: context.session // Pass session through if needed by the operationFn
|
session: context.session // Pass session through if needed by the operationFn
|
||||||
};
|
};
|
||||||
this.operations.set(operationId, operation);
|
this.operations.set(operationId, operation);
|
||||||
this.log(operationId, 'info', `Operation added.`);
|
this.log(operationId, 'info', `Operation added.`);
|
||||||
|
|
||||||
// Start execution in the background (don't await here)
|
// Start execution in the background (don't await here)
|
||||||
this._runOperation(operationId, operationFn, args, context).catch(err => {
|
this._runOperation(operationId, operationFn, args, context).catch((err) => {
|
||||||
// Catch unexpected errors during the async execution setup itself
|
// Catch unexpected errors during the async execution setup itself
|
||||||
this.log(operationId, 'error', `Critical error starting operation: ${err.message}`, { stack: err.stack });
|
this.log(
|
||||||
operation.status = 'failed';
|
operationId,
|
||||||
operation.error = { code: 'MANAGER_EXECUTION_ERROR', message: err.message };
|
'error',
|
||||||
operation.endTime = Date.now();
|
`Critical error starting operation: ${err.message}`,
|
||||||
|
{ stack: err.stack }
|
||||||
// Move to completed operations
|
);
|
||||||
this._moveToCompleted(operationId);
|
operation.status = 'failed';
|
||||||
});
|
operation.error = {
|
||||||
|
code: 'MANAGER_EXECUTION_ERROR',
|
||||||
|
message: err.message
|
||||||
|
};
|
||||||
|
operation.endTime = Date.now();
|
||||||
|
|
||||||
return operationId;
|
// Move to completed operations
|
||||||
}
|
this._moveToCompleted(operationId);
|
||||||
|
});
|
||||||
|
|
||||||
/**
|
return operationId;
|
||||||
* Internal function to execute the operation.
|
}
|
||||||
* @param {string} operationId - The ID of the operation.
|
|
||||||
* @param {Function} operationFn - The async function to execute.
|
|
||||||
* @param {Object} args - Arguments for the function.
|
|
||||||
* @param {Object} context - The original MCP tool context.
|
|
||||||
*/
|
|
||||||
async _runOperation(operationId, operationFn, args, context) {
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
if (!operation) return; // Should not happen
|
|
||||||
|
|
||||||
operation.status = 'running';
|
/**
|
||||||
this.log(operationId, 'info', `Operation running.`);
|
* Internal function to execute the operation.
|
||||||
this.emit('statusChanged', { operationId, status: 'running' });
|
* @param {string} operationId - The ID of the operation.
|
||||||
|
* @param {Function} operationFn - The async function to execute.
|
||||||
|
* @param {Object} args - Arguments for the function.
|
||||||
|
* @param {Object} context - The original MCP tool context.
|
||||||
|
*/
|
||||||
|
async _runOperation(operationId, operationFn, args, context) {
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
if (!operation) return; // Should not happen
|
||||||
|
|
||||||
try {
|
operation.status = 'running';
|
||||||
// Pass the necessary context parts to the direct function
|
this.log(operationId, 'info', `Operation running.`);
|
||||||
// The direct function needs to be adapted if it needs reportProgress
|
this.emit('statusChanged', { operationId, status: 'running' });
|
||||||
// We pass the original context's log, plus our wrapped reportProgress
|
|
||||||
const result = await operationFn(args, operation.log, {
|
|
||||||
reportProgress: (progress) => this._handleProgress(operationId, progress),
|
|
||||||
mcpLog: operation.log, // Pass log as mcpLog if direct fn expects it
|
|
||||||
session: operation.session
|
|
||||||
});
|
|
||||||
|
|
||||||
operation.status = result.success ? 'completed' : 'failed';
|
|
||||||
operation.result = result.success ? result.data : null;
|
|
||||||
operation.error = result.success ? null : result.error;
|
|
||||||
this.log(operationId, 'info', `Operation finished with status: ${operation.status}`);
|
|
||||||
|
|
||||||
} catch (error) {
|
try {
|
||||||
this.log(operationId, 'error', `Operation failed with error: ${error.message}`, { stack: error.stack });
|
// Pass the necessary context parts to the direct function
|
||||||
operation.status = 'failed';
|
// The direct function needs to be adapted if it needs reportProgress
|
||||||
operation.error = { code: 'OPERATION_EXECUTION_ERROR', message: error.message };
|
// We pass the original context's log, plus our wrapped reportProgress
|
||||||
} finally {
|
const result = await operationFn(args, operation.log, {
|
||||||
operation.endTime = Date.now();
|
reportProgress: (progress) =>
|
||||||
this.emit('statusChanged', { operationId, status: operation.status, result: operation.result, error: operation.error });
|
this._handleProgress(operationId, progress),
|
||||||
|
mcpLog: operation.log, // Pass log as mcpLog if direct fn expects it
|
||||||
// Move to completed operations if done or failed
|
session: operation.session
|
||||||
if (operation.status === 'completed' || operation.status === 'failed') {
|
});
|
||||||
this._moveToCompleted(operationId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Move an operation from active operations to completed operations history.
|
|
||||||
* @param {string} operationId - The ID of the operation to move.
|
|
||||||
* @private
|
|
||||||
*/
|
|
||||||
_moveToCompleted(operationId) {
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
if (!operation) return;
|
|
||||||
|
|
||||||
// Store only the necessary data in completed operations
|
|
||||||
const completedData = {
|
|
||||||
id: operation.id,
|
|
||||||
status: operation.status,
|
|
||||||
startTime: operation.startTime,
|
|
||||||
endTime: operation.endTime,
|
|
||||||
result: operation.result,
|
|
||||||
error: operation.error,
|
|
||||||
};
|
|
||||||
|
|
||||||
this.completedOperations.set(operationId, completedData);
|
|
||||||
this.operations.delete(operationId);
|
|
||||||
|
|
||||||
// Trim completed operations if exceeding maximum
|
|
||||||
if (this.completedOperations.size > this.maxCompletedOperations) {
|
|
||||||
// Get the oldest operation (sorted by endTime)
|
|
||||||
const oldest = [...this.completedOperations.entries()]
|
|
||||||
.sort((a, b) => a[1].endTime - b[1].endTime)[0];
|
|
||||||
|
|
||||||
if (oldest) {
|
|
||||||
this.completedOperations.delete(oldest[0]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handles progress updates from the running operation and forwards them.
|
|
||||||
* @param {string} operationId - The ID of the operation reporting progress.
|
|
||||||
* @param {Object} progress - The progress object { progress, total? }.
|
|
||||||
*/
|
|
||||||
_handleProgress(operationId, progress) {
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
if (operation && operation.reportProgress) {
|
|
||||||
try {
|
|
||||||
// Use the reportProgress function captured from the original context
|
|
||||||
operation.reportProgress(progress);
|
|
||||||
this.log(operationId, 'debug', `Reported progress: ${JSON.stringify(progress)}`);
|
|
||||||
} catch(err) {
|
|
||||||
this.log(operationId, 'warn', `Failed to report progress: ${err.message}`);
|
|
||||||
// Don't stop the operation, just log the reporting failure
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
operation.status = result.success ? 'completed' : 'failed';
|
||||||
* Retrieves the status and result/error of an operation.
|
operation.result = result.success ? result.data : null;
|
||||||
* @param {string} operationId - The ID of the operation.
|
operation.error = result.success ? null : result.error;
|
||||||
* @returns {Object | null} The operation details or null if not found.
|
this.log(
|
||||||
*/
|
operationId,
|
||||||
getStatus(operationId) {
|
'info',
|
||||||
// First check active operations
|
`Operation finished with status: ${operation.status}`
|
||||||
const operation = this.operations.get(operationId);
|
);
|
||||||
if (operation) {
|
} catch (error) {
|
||||||
return {
|
this.log(
|
||||||
id: operation.id,
|
operationId,
|
||||||
status: operation.status,
|
'error',
|
||||||
startTime: operation.startTime,
|
`Operation failed with error: ${error.message}`,
|
||||||
endTime: operation.endTime,
|
{ stack: error.stack }
|
||||||
result: operation.result,
|
);
|
||||||
error: operation.error,
|
operation.status = 'failed';
|
||||||
};
|
operation.error = {
|
||||||
}
|
code: 'OPERATION_EXECUTION_ERROR',
|
||||||
|
message: error.message
|
||||||
// Then check completed operations
|
};
|
||||||
const completedOperation = this.completedOperations.get(operationId);
|
} finally {
|
||||||
if (completedOperation) {
|
operation.endTime = Date.now();
|
||||||
return completedOperation;
|
this.emit('statusChanged', {
|
||||||
}
|
operationId,
|
||||||
|
status: operation.status,
|
||||||
// Operation not found in either active or completed
|
result: operation.result,
|
||||||
return {
|
error: operation.error
|
||||||
error: {
|
});
|
||||||
code: 'OPERATION_NOT_FOUND',
|
|
||||||
message: `Operation ID ${operationId} not found. It may have been completed and removed from history, or the ID may be invalid.`
|
|
||||||
},
|
|
||||||
status: 'not_found'
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Internal logging helper to prefix logs with the operation ID.
|
|
||||||
* @param {string} operationId - The ID of the operation.
|
|
||||||
* @param {'info'|'warn'|'error'|'debug'} level - Log level.
|
|
||||||
* @param {string} message - Log message.
|
|
||||||
* @param {Object} [meta] - Additional metadata.
|
|
||||||
*/
|
|
||||||
log(operationId, level, message, meta = {}) {
|
|
||||||
const operation = this.operations.get(operationId);
|
|
||||||
// Use the logger instance associated with the operation if available, otherwise console
|
|
||||||
const logger = operation?.log || console;
|
|
||||||
const logFn = logger[level] || logger.log || console.log; // Fallback
|
|
||||||
logFn(`[AsyncOp ${operationId}] ${message}`, meta);
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Basic Event Emitter ---
|
// Move to completed operations if done or failed
|
||||||
on(eventName, listener) {
|
if (operation.status === 'completed' || operation.status === 'failed') {
|
||||||
if (!this.listeners.has(eventName)) {
|
this._moveToCompleted(operationId);
|
||||||
this.listeners.set(eventName, []);
|
}
|
||||||
}
|
}
|
||||||
this.listeners.get(eventName).push(listener);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
emit(eventName, data) {
|
/**
|
||||||
if (this.listeners.has(eventName)) {
|
* Move an operation from active operations to completed operations history.
|
||||||
this.listeners.get(eventName).forEach(listener => listener(data));
|
* @param {string} operationId - The ID of the operation to move.
|
||||||
}
|
* @private
|
||||||
}
|
*/
|
||||||
|
_moveToCompleted(operationId) {
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
if (!operation) return;
|
||||||
|
|
||||||
|
// Store only the necessary data in completed operations
|
||||||
|
const completedData = {
|
||||||
|
id: operation.id,
|
||||||
|
status: operation.status,
|
||||||
|
startTime: operation.startTime,
|
||||||
|
endTime: operation.endTime,
|
||||||
|
result: operation.result,
|
||||||
|
error: operation.error
|
||||||
|
};
|
||||||
|
|
||||||
|
this.completedOperations.set(operationId, completedData);
|
||||||
|
this.operations.delete(operationId);
|
||||||
|
|
||||||
|
// Trim completed operations if exceeding maximum
|
||||||
|
if (this.completedOperations.size > this.maxCompletedOperations) {
|
||||||
|
// Get the oldest operation (sorted by endTime)
|
||||||
|
const oldest = [...this.completedOperations.entries()].sort(
|
||||||
|
(a, b) => a[1].endTime - b[1].endTime
|
||||||
|
)[0];
|
||||||
|
|
||||||
|
if (oldest) {
|
||||||
|
this.completedOperations.delete(oldest[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handles progress updates from the running operation and forwards them.
|
||||||
|
* @param {string} operationId - The ID of the operation reporting progress.
|
||||||
|
* @param {Object} progress - The progress object { progress, total? }.
|
||||||
|
*/
|
||||||
|
_handleProgress(operationId, progress) {
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
if (operation && operation.reportProgress) {
|
||||||
|
try {
|
||||||
|
// Use the reportProgress function captured from the original context
|
||||||
|
operation.reportProgress(progress);
|
||||||
|
this.log(
|
||||||
|
operationId,
|
||||||
|
'debug',
|
||||||
|
`Reported progress: ${JSON.stringify(progress)}`
|
||||||
|
);
|
||||||
|
} catch (err) {
|
||||||
|
this.log(
|
||||||
|
operationId,
|
||||||
|
'warn',
|
||||||
|
`Failed to report progress: ${err.message}`
|
||||||
|
);
|
||||||
|
// Don't stop the operation, just log the reporting failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retrieves the status and result/error of an operation.
|
||||||
|
* @param {string} operationId - The ID of the operation.
|
||||||
|
* @returns {Object | null} The operation details or null if not found.
|
||||||
|
*/
|
||||||
|
getStatus(operationId) {
|
||||||
|
// First check active operations
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
if (operation) {
|
||||||
|
return {
|
||||||
|
id: operation.id,
|
||||||
|
status: operation.status,
|
||||||
|
startTime: operation.startTime,
|
||||||
|
endTime: operation.endTime,
|
||||||
|
result: operation.result,
|
||||||
|
error: operation.error
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then check completed operations
|
||||||
|
const completedOperation = this.completedOperations.get(operationId);
|
||||||
|
if (completedOperation) {
|
||||||
|
return completedOperation;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Operation not found in either active or completed
|
||||||
|
return {
|
||||||
|
error: {
|
||||||
|
code: 'OPERATION_NOT_FOUND',
|
||||||
|
message: `Operation ID ${operationId} not found. It may have been completed and removed from history, or the ID may be invalid.`
|
||||||
|
},
|
||||||
|
status: 'not_found'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Internal logging helper to prefix logs with the operation ID.
|
||||||
|
* @param {string} operationId - The ID of the operation.
|
||||||
|
* @param {'info'|'warn'|'error'|'debug'} level - Log level.
|
||||||
|
* @param {string} message - Log message.
|
||||||
|
* @param {Object} [meta] - Additional metadata.
|
||||||
|
*/
|
||||||
|
log(operationId, level, message, meta = {}) {
|
||||||
|
const operation = this.operations.get(operationId);
|
||||||
|
// Use the logger instance associated with the operation if available, otherwise console
|
||||||
|
const logger = operation?.log || console;
|
||||||
|
const logFn = logger[level] || logger.log || console.log; // Fallback
|
||||||
|
logFn(`[AsyncOp ${operationId}] ${message}`, meta);
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Basic Event Emitter ---
|
||||||
|
on(eventName, listener) {
|
||||||
|
if (!this.listeners.has(eventName)) {
|
||||||
|
this.listeners.set(eventName, []);
|
||||||
|
}
|
||||||
|
this.listeners.get(eventName).push(listener);
|
||||||
|
}
|
||||||
|
|
||||||
|
emit(eventName, data) {
|
||||||
|
if (this.listeners.has(eventName)) {
|
||||||
|
this.listeners.get(eventName).forEach((listener) => listener(data));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export a singleton instance
|
// Export a singleton instance
|
||||||
|
|||||||
@@ -6,38 +6,42 @@
|
|||||||
* @returns {Promise<any>} The result of the actionFn.
|
* @returns {Promise<any>} The result of the actionFn.
|
||||||
*/
|
*/
|
||||||
export async function withSessionEnv(sessionEnv, actionFn) {
|
export async function withSessionEnv(sessionEnv, actionFn) {
|
||||||
if (!sessionEnv || typeof sessionEnv !== 'object' || Object.keys(sessionEnv).length === 0) {
|
if (
|
||||||
// If no sessionEnv is provided, just run the action directly
|
!sessionEnv ||
|
||||||
return await actionFn();
|
typeof sessionEnv !== 'object' ||
|
||||||
}
|
Object.keys(sessionEnv).length === 0
|
||||||
|
) {
|
||||||
const originalEnv = {};
|
// If no sessionEnv is provided, just run the action directly
|
||||||
const keysToRestore = [];
|
return await actionFn();
|
||||||
|
}
|
||||||
// Set environment variables from sessionEnv
|
|
||||||
for (const key in sessionEnv) {
|
const originalEnv = {};
|
||||||
if (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {
|
const keysToRestore = [];
|
||||||
// Store original value if it exists, otherwise mark for deletion
|
|
||||||
if (process.env[key] !== undefined) {
|
// Set environment variables from sessionEnv
|
||||||
originalEnv[key] = process.env[key];
|
for (const key in sessionEnv) {
|
||||||
}
|
if (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {
|
||||||
keysToRestore.push(key);
|
// Store original value if it exists, otherwise mark for deletion
|
||||||
process.env[key] = sessionEnv[key];
|
if (process.env[key] !== undefined) {
|
||||||
}
|
originalEnv[key] = process.env[key];
|
||||||
}
|
}
|
||||||
|
keysToRestore.push(key);
|
||||||
try {
|
process.env[key] = sessionEnv[key];
|
||||||
// Execute the provided action function
|
}
|
||||||
return await actionFn();
|
}
|
||||||
} finally {
|
|
||||||
// Restore original environment variables
|
try {
|
||||||
for (const key of keysToRestore) {
|
// Execute the provided action function
|
||||||
if (Object.prototype.hasOwnProperty.call(originalEnv, key)) {
|
return await actionFn();
|
||||||
process.env[key] = originalEnv[key];
|
} finally {
|
||||||
} else {
|
// Restore original environment variables
|
||||||
// If the key didn't exist originally, delete it
|
for (const key of keysToRestore) {
|
||||||
delete process.env[key];
|
if (Object.prototype.hasOwnProperty.call(originalEnv, key)) {
|
||||||
}
|
process.env[key] = originalEnv[key];
|
||||||
}
|
} else {
|
||||||
}
|
// If the key didn't exist originally, delete it
|
||||||
}
|
delete process.env[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
/**
|
/**
|
||||||
* path-utils.js
|
* path-utils.js
|
||||||
* Utility functions for file path operations in Task Master
|
* Utility functions for file path operations in Task Master
|
||||||
*
|
*
|
||||||
* This module provides robust path resolution for both:
|
* This module provides robust path resolution for both:
|
||||||
* 1. PACKAGE PATH: Where task-master code is installed
|
* 1. PACKAGE PATH: Where task-master code is installed
|
||||||
* (global node_modules OR local ./node_modules/task-master OR direct from repo)
|
* (global node_modules OR local ./node_modules/task-master OR direct from repo)
|
||||||
* 2. PROJECT PATH: Where user's tasks.json resides (typically user's project root)
|
* 2. PROJECT PATH: Where user's tasks.json resides (typically user's project root)
|
||||||
*/
|
*/
|
||||||
@@ -18,43 +18,43 @@ export let lastFoundProjectRoot = null;
|
|||||||
|
|
||||||
// Project marker files that indicate a potential project root
|
// Project marker files that indicate a potential project root
|
||||||
export const PROJECT_MARKERS = [
|
export const PROJECT_MARKERS = [
|
||||||
// Task Master specific
|
// Task Master specific
|
||||||
'tasks.json',
|
'tasks.json',
|
||||||
'tasks/tasks.json',
|
'tasks/tasks.json',
|
||||||
|
|
||||||
// Common version control
|
// Common version control
|
||||||
'.git',
|
'.git',
|
||||||
'.svn',
|
'.svn',
|
||||||
|
|
||||||
// Common package files
|
// Common package files
|
||||||
'package.json',
|
'package.json',
|
||||||
'pyproject.toml',
|
'pyproject.toml',
|
||||||
'Gemfile',
|
'Gemfile',
|
||||||
'go.mod',
|
'go.mod',
|
||||||
'Cargo.toml',
|
'Cargo.toml',
|
||||||
|
|
||||||
// Common IDE/editor folders
|
// Common IDE/editor folders
|
||||||
'.cursor',
|
'.cursor',
|
||||||
'.vscode',
|
'.vscode',
|
||||||
'.idea',
|
'.idea',
|
||||||
|
|
||||||
// Common dependency directories (check if directory)
|
// Common dependency directories (check if directory)
|
||||||
'node_modules',
|
'node_modules',
|
||||||
'venv',
|
'venv',
|
||||||
'.venv',
|
'.venv',
|
||||||
|
|
||||||
// Common config files
|
// Common config files
|
||||||
'.env',
|
'.env',
|
||||||
'.eslintrc',
|
'.eslintrc',
|
||||||
'tsconfig.json',
|
'tsconfig.json',
|
||||||
'babel.config.js',
|
'babel.config.js',
|
||||||
'jest.config.js',
|
'jest.config.js',
|
||||||
'webpack.config.js',
|
'webpack.config.js',
|
||||||
|
|
||||||
// Common CI/CD files
|
// Common CI/CD files
|
||||||
'.github/workflows',
|
'.github/workflows',
|
||||||
'.gitlab-ci.yml',
|
'.gitlab-ci.yml',
|
||||||
'.circleci/config.yml'
|
'.circleci/config.yml'
|
||||||
];
|
];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -63,15 +63,15 @@ export const PROJECT_MARKERS = [
|
|||||||
* @returns {string} - Absolute path to the package installation directory
|
* @returns {string} - Absolute path to the package installation directory
|
||||||
*/
|
*/
|
||||||
export function getPackagePath() {
|
export function getPackagePath() {
|
||||||
// When running from source, __dirname is the directory containing this file
|
// When running from source, __dirname is the directory containing this file
|
||||||
// When running from npm, we need to find the package root
|
// When running from npm, we need to find the package root
|
||||||
const thisFilePath = fileURLToPath(import.meta.url);
|
const thisFilePath = fileURLToPath(import.meta.url);
|
||||||
const thisFileDir = path.dirname(thisFilePath);
|
const thisFileDir = path.dirname(thisFilePath);
|
||||||
|
|
||||||
// Navigate from core/utils up to the package root
|
// Navigate from core/utils up to the package root
|
||||||
// In dev: /path/to/task-master/mcp-server/src/core/utils -> /path/to/task-master
|
// In dev: /path/to/task-master/mcp-server/src/core/utils -> /path/to/task-master
|
||||||
// In npm: /path/to/node_modules/task-master/mcp-server/src/core/utils -> /path/to/node_modules/task-master
|
// In npm: /path/to/node_modules/task-master/mcp-server/src/core/utils -> /path/to/node_modules/task-master
|
||||||
return path.resolve(thisFileDir, '../../../../');
|
return path.resolve(thisFileDir, '../../../../');
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -82,62 +82,73 @@ export function getPackagePath() {
|
|||||||
* @throws {Error} - If tasks.json cannot be found.
|
* @throws {Error} - If tasks.json cannot be found.
|
||||||
*/
|
*/
|
||||||
export function findTasksJsonPath(args, log) {
|
export function findTasksJsonPath(args, log) {
|
||||||
// PRECEDENCE ORDER for finding tasks.json:
|
// PRECEDENCE ORDER for finding tasks.json:
|
||||||
// 1. Explicitly provided `projectRoot` in args (Highest priority, expected in MCP context)
|
// 1. Explicitly provided `projectRoot` in args (Highest priority, expected in MCP context)
|
||||||
// 2. Previously found/cached `lastFoundProjectRoot` (primarily for CLI performance)
|
// 2. Previously found/cached `lastFoundProjectRoot` (primarily for CLI performance)
|
||||||
// 3. Search upwards from current working directory (`process.cwd()`) - CLI usage
|
// 3. Search upwards from current working directory (`process.cwd()`) - CLI usage
|
||||||
|
|
||||||
// 1. If project root is explicitly provided (e.g., from MCP session), use it directly
|
|
||||||
if (args.projectRoot) {
|
|
||||||
const projectRoot = args.projectRoot;
|
|
||||||
log.info(`Using explicitly provided project root: ${projectRoot}`);
|
|
||||||
try {
|
|
||||||
// This will throw if tasks.json isn't found within this root
|
|
||||||
return findTasksJsonInDirectory(projectRoot, args.file, log);
|
|
||||||
} catch (error) {
|
|
||||||
// Include debug info in error
|
|
||||||
const debugInfo = {
|
|
||||||
projectRoot,
|
|
||||||
currentDir: process.cwd(),
|
|
||||||
serverDir: path.dirname(process.argv[1]),
|
|
||||||
possibleProjectRoot: path.resolve(path.dirname(process.argv[1]), '../..'),
|
|
||||||
lastFoundProjectRoot,
|
|
||||||
searchedPaths: error.message
|
|
||||||
};
|
|
||||||
|
|
||||||
error.message = `Tasks file not found in any of the expected locations relative to project root "${projectRoot}" (from session).\nDebug Info: ${JSON.stringify(debugInfo, null, 2)}`;
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Fallback logic primarily for CLI or when projectRoot isn't passed ---
|
|
||||||
|
|
||||||
// 2. If we have a last known project root that worked, try it first
|
// 1. If project root is explicitly provided (e.g., from MCP session), use it directly
|
||||||
if (lastFoundProjectRoot) {
|
if (args.projectRoot) {
|
||||||
log.info(`Trying last known project root: ${lastFoundProjectRoot}`);
|
const projectRoot = args.projectRoot;
|
||||||
try {
|
log.info(`Using explicitly provided project root: ${projectRoot}`);
|
||||||
// Use the cached root
|
try {
|
||||||
const tasksPath = findTasksJsonInDirectory(lastFoundProjectRoot, args.file, log);
|
// This will throw if tasks.json isn't found within this root
|
||||||
return tasksPath; // Return if found in cached root
|
return findTasksJsonInDirectory(projectRoot, args.file, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.info(`Task file not found in last known project root, continuing search.`);
|
// Include debug info in error
|
||||||
// Continue with search if not found in cache
|
const debugInfo = {
|
||||||
}
|
projectRoot,
|
||||||
}
|
currentDir: process.cwd(),
|
||||||
|
serverDir: path.dirname(process.argv[1]),
|
||||||
// 3. Start search from current directory (most common CLI scenario)
|
possibleProjectRoot: path.resolve(
|
||||||
const startDir = process.cwd();
|
path.dirname(process.argv[1]),
|
||||||
log.info(`Searching for tasks.json starting from current directory: ${startDir}`);
|
'../..'
|
||||||
|
),
|
||||||
// Try to find tasks.json by walking up the directory tree from cwd
|
lastFoundProjectRoot,
|
||||||
try {
|
searchedPaths: error.message
|
||||||
// This will throw if not found in the CWD tree
|
};
|
||||||
return findTasksJsonWithParentSearch(startDir, args.file, log);
|
|
||||||
} catch (error) {
|
error.message = `Tasks file not found in any of the expected locations relative to project root "${projectRoot}" (from session).\nDebug Info: ${JSON.stringify(debugInfo, null, 2)}`;
|
||||||
// If all attempts fail, augment and throw the original error from CWD search
|
throw error;
|
||||||
error.message = `${error.message}\n\nPossible solutions:\n1. Run the command from your project directory containing tasks.json\n2. Use --project-root=/path/to/project to specify the project location (if using CLI)\n3. Ensure the project root is correctly passed from the client (if using MCP)\n\nCurrent working directory: ${startDir}\nLast known project root: ${lastFoundProjectRoot}\nProject root from args: ${args.projectRoot}`;
|
}
|
||||||
throw error;
|
}
|
||||||
}
|
|
||||||
|
// --- Fallback logic primarily for CLI or when projectRoot isn't passed ---
|
||||||
|
|
||||||
|
// 2. If we have a last known project root that worked, try it first
|
||||||
|
if (lastFoundProjectRoot) {
|
||||||
|
log.info(`Trying last known project root: ${lastFoundProjectRoot}`);
|
||||||
|
try {
|
||||||
|
// Use the cached root
|
||||||
|
const tasksPath = findTasksJsonInDirectory(
|
||||||
|
lastFoundProjectRoot,
|
||||||
|
args.file,
|
||||||
|
log
|
||||||
|
);
|
||||||
|
return tasksPath; // Return if found in cached root
|
||||||
|
} catch (error) {
|
||||||
|
log.info(
|
||||||
|
`Task file not found in last known project root, continuing search.`
|
||||||
|
);
|
||||||
|
// Continue with search if not found in cache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Start search from current directory (most common CLI scenario)
|
||||||
|
const startDir = process.cwd();
|
||||||
|
log.info(
|
||||||
|
`Searching for tasks.json starting from current directory: ${startDir}`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Try to find tasks.json by walking up the directory tree from cwd
|
||||||
|
try {
|
||||||
|
// This will throw if not found in the CWD tree
|
||||||
|
return findTasksJsonWithParentSearch(startDir, args.file, log);
|
||||||
|
} catch (error) {
|
||||||
|
// If all attempts fail, augment and throw the original error from CWD search
|
||||||
|
error.message = `${error.message}\n\nPossible solutions:\n1. Run the command from your project directory containing tasks.json\n2. Use --project-root=/path/to/project to specify the project location (if using CLI)\n3. Ensure the project root is correctly passed from the client (if using MCP)\n\nCurrent working directory: ${startDir}\nLast known project root: ${lastFoundProjectRoot}\nProject root from args: ${args.projectRoot}`;
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -146,11 +157,11 @@ export function findTasksJsonPath(args, log) {
|
|||||||
* @returns {boolean} - True if the directory contains any project markers
|
* @returns {boolean} - True if the directory contains any project markers
|
||||||
*/
|
*/
|
||||||
function hasProjectMarkers(dirPath) {
|
function hasProjectMarkers(dirPath) {
|
||||||
return PROJECT_MARKERS.some(marker => {
|
return PROJECT_MARKERS.some((marker) => {
|
||||||
const markerPath = path.join(dirPath, marker);
|
const markerPath = path.join(dirPath, marker);
|
||||||
// Check if the marker exists as either a file or directory
|
// Check if the marker exists as either a file or directory
|
||||||
return fs.existsSync(markerPath);
|
return fs.existsSync(markerPath);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -162,39 +173,41 @@ function hasProjectMarkers(dirPath) {
|
|||||||
* @throws {Error} - If tasks.json cannot be found
|
* @throws {Error} - If tasks.json cannot be found
|
||||||
*/
|
*/
|
||||||
function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
|
function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
|
||||||
const possiblePaths = [];
|
const possiblePaths = [];
|
||||||
|
|
||||||
// 1. If a file is explicitly provided relative to dirPath
|
// 1. If a file is explicitly provided relative to dirPath
|
||||||
if (explicitFilePath) {
|
if (explicitFilePath) {
|
||||||
possiblePaths.push(path.resolve(dirPath, explicitFilePath));
|
possiblePaths.push(path.resolve(dirPath, explicitFilePath));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Check the standard locations relative to dirPath
|
// 2. Check the standard locations relative to dirPath
|
||||||
possiblePaths.push(
|
possiblePaths.push(
|
||||||
path.join(dirPath, 'tasks.json'),
|
path.join(dirPath, 'tasks.json'),
|
||||||
path.join(dirPath, 'tasks', 'tasks.json')
|
path.join(dirPath, 'tasks', 'tasks.json')
|
||||||
);
|
);
|
||||||
|
|
||||||
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
|
log.info(`Checking potential task file paths: ${possiblePaths.join(', ')}`);
|
||||||
|
|
||||||
// Find the first existing path
|
// Find the first existing path
|
||||||
for (const p of possiblePaths) {
|
for (const p of possiblePaths) {
|
||||||
log.info(`Checking if exists: ${p}`);
|
log.info(`Checking if exists: ${p}`);
|
||||||
const exists = fs.existsSync(p);
|
const exists = fs.existsSync(p);
|
||||||
log.info(`Path ${p} exists: ${exists}`);
|
log.info(`Path ${p} exists: ${exists}`);
|
||||||
|
|
||||||
if (exists) {
|
|
||||||
log.info(`Found tasks file at: ${p}`);
|
|
||||||
// Store the project root for future use
|
|
||||||
lastFoundProjectRoot = dirPath;
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no file was found, throw an error
|
if (exists) {
|
||||||
const error = new Error(`Tasks file not found in any of the expected locations relative to ${dirPath}: ${possiblePaths.join(', ')}`);
|
log.info(`Found tasks file at: ${p}`);
|
||||||
error.code = 'TASKS_FILE_NOT_FOUND';
|
// Store the project root for future use
|
||||||
throw error;
|
lastFoundProjectRoot = dirPath;
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no file was found, throw an error
|
||||||
|
const error = new Error(
|
||||||
|
`Tasks file not found in any of the expected locations relative to ${dirPath}: ${possiblePaths.join(', ')}`
|
||||||
|
);
|
||||||
|
error.code = 'TASKS_FILE_NOT_FOUND';
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -207,66 +220,74 @@ function findTasksJsonInDirectory(dirPath, explicitFilePath, log) {
|
|||||||
* @throws {Error} - If tasks.json cannot be found in any parent directory
|
* @throws {Error} - If tasks.json cannot be found in any parent directory
|
||||||
*/
|
*/
|
||||||
function findTasksJsonWithParentSearch(startDir, explicitFilePath, log) {
|
function findTasksJsonWithParentSearch(startDir, explicitFilePath, log) {
|
||||||
let currentDir = startDir;
|
let currentDir = startDir;
|
||||||
const rootDir = path.parse(currentDir).root;
|
const rootDir = path.parse(currentDir).root;
|
||||||
|
|
||||||
// Keep traversing up until we hit the root directory
|
// Keep traversing up until we hit the root directory
|
||||||
while (currentDir !== rootDir) {
|
while (currentDir !== rootDir) {
|
||||||
// First check for tasks.json directly
|
// First check for tasks.json directly
|
||||||
try {
|
try {
|
||||||
return findTasksJsonInDirectory(currentDir, explicitFilePath, log);
|
return findTasksJsonInDirectory(currentDir, explicitFilePath, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// If tasks.json not found but the directory has project markers,
|
// If tasks.json not found but the directory has project markers,
|
||||||
// log it as a potential project root (helpful for debugging)
|
// log it as a potential project root (helpful for debugging)
|
||||||
if (hasProjectMarkers(currentDir)) {
|
if (hasProjectMarkers(currentDir)) {
|
||||||
log.info(`Found project markers in ${currentDir}, but no tasks.json`);
|
log.info(`Found project markers in ${currentDir}, but no tasks.json`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move up to parent directory
|
// Move up to parent directory
|
||||||
const parentDir = path.dirname(currentDir);
|
const parentDir = path.dirname(currentDir);
|
||||||
|
|
||||||
// Check if we've reached the root
|
// Check if we've reached the root
|
||||||
if (parentDir === currentDir) {
|
if (parentDir === currentDir) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(`Tasks file not found in ${currentDir}, searching in parent directory: ${parentDir}`);
|
log.info(
|
||||||
currentDir = parentDir;
|
`Tasks file not found in ${currentDir}, searching in parent directory: ${parentDir}`
|
||||||
}
|
);
|
||||||
}
|
currentDir = parentDir;
|
||||||
|
}
|
||||||
// If we've searched all the way to the root and found nothing
|
}
|
||||||
const error = new Error(`Tasks file not found in ${startDir} or any parent directory.`);
|
|
||||||
error.code = 'TASKS_FILE_NOT_FOUND';
|
// If we've searched all the way to the root and found nothing
|
||||||
throw error;
|
const error = new Error(
|
||||||
|
`Tasks file not found in ${startDir} or any parent directory.`
|
||||||
|
);
|
||||||
|
error.code = 'TASKS_FILE_NOT_FOUND';
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: findTasksWithNpmConsideration is not used by findTasksJsonPath and might be legacy or used elsewhere.
|
// Note: findTasksWithNpmConsideration is not used by findTasksJsonPath and might be legacy or used elsewhere.
|
||||||
// If confirmed unused, it could potentially be removed in a separate cleanup.
|
// If confirmed unused, it could potentially be removed in a separate cleanup.
|
||||||
function findTasksWithNpmConsideration(startDir, log) {
|
function findTasksWithNpmConsideration(startDir, log) {
|
||||||
// First try our recursive parent search from cwd
|
// First try our recursive parent search from cwd
|
||||||
try {
|
try {
|
||||||
return findTasksJsonWithParentSearch(startDir, null, log);
|
return findTasksJsonWithParentSearch(startDir, null, log);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// If that fails, try looking relative to the executable location
|
// If that fails, try looking relative to the executable location
|
||||||
const execPath = process.argv[1];
|
const execPath = process.argv[1];
|
||||||
const execDir = path.dirname(execPath);
|
const execDir = path.dirname(execPath);
|
||||||
log.info(`Looking for tasks file relative to executable at: ${execDir}`);
|
log.info(`Looking for tasks file relative to executable at: ${execDir}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
return findTasksJsonWithParentSearch(execDir, null, log);
|
return findTasksJsonWithParentSearch(execDir, null, log);
|
||||||
} catch (secondError) {
|
} catch (secondError) {
|
||||||
// If that also fails, check standard locations in user's home directory
|
// If that also fails, check standard locations in user's home directory
|
||||||
const homeDir = os.homedir();
|
const homeDir = os.homedir();
|
||||||
log.info(`Looking for tasks file in home directory: ${homeDir}`);
|
log.info(`Looking for tasks file in home directory: ${homeDir}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Check standard locations in home dir
|
// Check standard locations in home dir
|
||||||
return findTasksJsonInDirectory(path.join(homeDir, '.task-master'), null, log);
|
return findTasksJsonInDirectory(
|
||||||
} catch (thirdError) {
|
path.join(homeDir, '.task-master'),
|
||||||
// If all approaches fail, throw the original error
|
null,
|
||||||
throw error;
|
log
|
||||||
}
|
);
|
||||||
}
|
} catch (thirdError) {
|
||||||
}
|
// If all approaches fail, throw the original error
|
||||||
}
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
import { FastMCP } from "fastmcp";
|
import { FastMCP } from 'fastmcp';
|
||||||
import path from "path";
|
import path from 'path';
|
||||||
import dotenv from "dotenv";
|
import dotenv from 'dotenv';
|
||||||
import { fileURLToPath } from "url";
|
import { fileURLToPath } from 'url';
|
||||||
import fs from "fs";
|
import fs from 'fs';
|
||||||
import logger from "./logger.js";
|
import logger from './logger.js';
|
||||||
import { registerTaskMasterTools } from "./tools/index.js";
|
import { registerTaskMasterTools } from './tools/index.js';
|
||||||
import { asyncOperationManager } from './core/utils/async-manager.js';
|
import { asyncOperationManager } from './core/utils/async-manager.js';
|
||||||
|
|
||||||
// Load environment variables
|
// Load environment variables
|
||||||
@@ -18,74 +18,74 @@ const __dirname = path.dirname(__filename);
|
|||||||
* Main MCP server class that integrates with Task Master
|
* Main MCP server class that integrates with Task Master
|
||||||
*/
|
*/
|
||||||
class TaskMasterMCPServer {
|
class TaskMasterMCPServer {
|
||||||
constructor() {
|
constructor() {
|
||||||
// Get version from package.json using synchronous fs
|
// Get version from package.json using synchronous fs
|
||||||
const packagePath = path.join(__dirname, "../../package.json");
|
const packagePath = path.join(__dirname, '../../package.json');
|
||||||
const packageJson = JSON.parse(fs.readFileSync(packagePath, "utf8"));
|
const packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8'));
|
||||||
|
|
||||||
this.options = {
|
this.options = {
|
||||||
name: "Task Master MCP Server",
|
name: 'Task Master MCP Server',
|
||||||
version: packageJson.version,
|
version: packageJson.version
|
||||||
};
|
};
|
||||||
|
|
||||||
this.server = new FastMCP(this.options);
|
this.server = new FastMCP(this.options);
|
||||||
this.initialized = false;
|
this.initialized = false;
|
||||||
|
|
||||||
this.server.addResource({});
|
this.server.addResource({});
|
||||||
|
|
||||||
this.server.addResourceTemplate({});
|
this.server.addResourceTemplate({});
|
||||||
|
|
||||||
// Make the manager accessible (e.g., pass it to tool registration)
|
// Make the manager accessible (e.g., pass it to tool registration)
|
||||||
this.asyncManager = asyncOperationManager;
|
this.asyncManager = asyncOperationManager;
|
||||||
|
|
||||||
// Bind methods
|
// Bind methods
|
||||||
this.init = this.init.bind(this);
|
this.init = this.init.bind(this);
|
||||||
this.start = this.start.bind(this);
|
this.start = this.start.bind(this);
|
||||||
this.stop = this.stop.bind(this);
|
this.stop = this.stop.bind(this);
|
||||||
|
|
||||||
// Setup logging
|
// Setup logging
|
||||||
this.logger = logger;
|
this.logger = logger;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialize the MCP server with necessary tools and routes
|
* Initialize the MCP server with necessary tools and routes
|
||||||
*/
|
*/
|
||||||
async init() {
|
async init() {
|
||||||
if (this.initialized) return;
|
if (this.initialized) return;
|
||||||
|
|
||||||
// Pass the manager instance to the tool registration function
|
// Pass the manager instance to the tool registration function
|
||||||
registerTaskMasterTools(this.server, this.asyncManager);
|
registerTaskMasterTools(this.server, this.asyncManager);
|
||||||
|
|
||||||
this.initialized = true;
|
this.initialized = true;
|
||||||
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Start the MCP server
|
* Start the MCP server
|
||||||
*/
|
*/
|
||||||
async start() {
|
async start() {
|
||||||
if (!this.initialized) {
|
if (!this.initialized) {
|
||||||
await this.init();
|
await this.init();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the FastMCP server with increased timeout
|
// Start the FastMCP server with increased timeout
|
||||||
await this.server.start({
|
await this.server.start({
|
||||||
transportType: "stdio",
|
transportType: 'stdio',
|
||||||
timeout: 120000 // 2 minutes timeout (in milliseconds)
|
timeout: 120000 // 2 minutes timeout (in milliseconds)
|
||||||
});
|
});
|
||||||
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stop the MCP server
|
* Stop the MCP server
|
||||||
*/
|
*/
|
||||||
async stop() {
|
async stop() {
|
||||||
if (this.server) {
|
if (this.server) {
|
||||||
await this.server.stop();
|
await this.server.stop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export the manager from here as well, if needed elsewhere
|
// Export the manager from here as well, if needed elsewhere
|
||||||
|
|||||||
@@ -1,19 +1,19 @@
|
|||||||
import chalk from "chalk";
|
import chalk from 'chalk';
|
||||||
import { isSilentMode } from "../../scripts/modules/utils.js";
|
import { isSilentMode } from '../../scripts/modules/utils.js';
|
||||||
|
|
||||||
// Define log levels
|
// Define log levels
|
||||||
const LOG_LEVELS = {
|
const LOG_LEVELS = {
|
||||||
debug: 0,
|
debug: 0,
|
||||||
info: 1,
|
info: 1,
|
||||||
warn: 2,
|
warn: 2,
|
||||||
error: 3,
|
error: 3,
|
||||||
success: 4,
|
success: 4
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get log level from environment or default to info
|
// Get log level from environment or default to info
|
||||||
const LOG_LEVEL = process.env.LOG_LEVEL
|
const LOG_LEVEL = process.env.LOG_LEVEL
|
||||||
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] ?? LOG_LEVELS.info
|
? (LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] ?? LOG_LEVELS.info)
|
||||||
: LOG_LEVELS.info;
|
: LOG_LEVELS.info;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Logs a message with the specified level
|
* Logs a message with the specified level
|
||||||
@@ -21,56 +21,66 @@ const LOG_LEVEL = process.env.LOG_LEVEL
|
|||||||
* @param {...any} args - Arguments to log
|
* @param {...any} args - Arguments to log
|
||||||
*/
|
*/
|
||||||
function log(level, ...args) {
|
function log(level, ...args) {
|
||||||
// Skip logging if silent mode is enabled
|
// Skip logging if silent mode is enabled
|
||||||
if (isSilentMode()) {
|
if (isSilentMode()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use text prefixes instead of emojis
|
// Use text prefixes instead of emojis
|
||||||
const prefixes = {
|
const prefixes = {
|
||||||
debug: chalk.gray("[DEBUG]"),
|
debug: chalk.gray('[DEBUG]'),
|
||||||
info: chalk.blue("[INFO]"),
|
info: chalk.blue('[INFO]'),
|
||||||
warn: chalk.yellow("[WARN]"),
|
warn: chalk.yellow('[WARN]'),
|
||||||
error: chalk.red("[ERROR]"),
|
error: chalk.red('[ERROR]'),
|
||||||
success: chalk.green("[SUCCESS]"),
|
success: chalk.green('[SUCCESS]')
|
||||||
};
|
};
|
||||||
|
|
||||||
if (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {
|
if (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {
|
||||||
const prefix = prefixes[level] || "";
|
const prefix = prefixes[level] || '';
|
||||||
let coloredArgs = args;
|
let coloredArgs = args;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
switch(level) {
|
switch (level) {
|
||||||
case "error":
|
case 'error':
|
||||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.red(arg) : arg);
|
coloredArgs = args.map((arg) =>
|
||||||
break;
|
typeof arg === 'string' ? chalk.red(arg) : arg
|
||||||
case "warn":
|
);
|
||||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.yellow(arg) : arg);
|
break;
|
||||||
break;
|
case 'warn':
|
||||||
case "success":
|
coloredArgs = args.map((arg) =>
|
||||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.green(arg) : arg);
|
typeof arg === 'string' ? chalk.yellow(arg) : arg
|
||||||
break;
|
);
|
||||||
case "info":
|
break;
|
||||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.blue(arg) : arg);
|
case 'success':
|
||||||
break;
|
coloredArgs = args.map((arg) =>
|
||||||
case "debug":
|
typeof arg === 'string' ? chalk.green(arg) : arg
|
||||||
coloredArgs = args.map(arg => typeof arg === 'string' ? chalk.gray(arg) : arg);
|
);
|
||||||
break;
|
break;
|
||||||
// default: use original args (no color)
|
case 'info':
|
||||||
}
|
coloredArgs = args.map((arg) =>
|
||||||
} catch (colorError) {
|
typeof arg === 'string' ? chalk.blue(arg) : arg
|
||||||
// Fallback if chalk fails on an argument
|
);
|
||||||
// Use console.error here for internal logger errors, separate from normal logging
|
break;
|
||||||
console.error("Internal Logger Error applying chalk color:", colorError);
|
case 'debug':
|
||||||
coloredArgs = args;
|
coloredArgs = args.map((arg) =>
|
||||||
}
|
typeof arg === 'string' ? chalk.gray(arg) : arg
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
// default: use original args (no color)
|
||||||
|
}
|
||||||
|
} catch (colorError) {
|
||||||
|
// Fallback if chalk fails on an argument
|
||||||
|
// Use console.error here for internal logger errors, separate from normal logging
|
||||||
|
console.error('Internal Logger Error applying chalk color:', colorError);
|
||||||
|
coloredArgs = args;
|
||||||
|
}
|
||||||
|
|
||||||
// Revert to console.log - FastMCP's context logger (context.log)
|
// Revert to console.log - FastMCP's context logger (context.log)
|
||||||
// is responsible for directing logs correctly (e.g., to stderr)
|
// is responsible for directing logs correctly (e.g., to stderr)
|
||||||
// during tool execution without upsetting the client connection.
|
// during tool execution without upsetting the client connection.
|
||||||
// Logs outside of tool execution (like startup) will go to stdout.
|
// Logs outside of tool execution (like startup) will go to stdout.
|
||||||
console.log(prefix, ...coloredArgs);
|
console.log(prefix, ...coloredArgs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -78,16 +88,19 @@ function log(level, ...args) {
|
|||||||
* @returns {Object} Logger object with info, error, debug, warn, and success methods
|
* @returns {Object} Logger object with info, error, debug, warn, and success methods
|
||||||
*/
|
*/
|
||||||
export function createLogger() {
|
export function createLogger() {
|
||||||
const createLogMethod = (level) => (...args) => log(level, ...args);
|
const createLogMethod =
|
||||||
|
(level) =>
|
||||||
|
(...args) =>
|
||||||
|
log(level, ...args);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
debug: createLogMethod("debug"),
|
debug: createLogMethod('debug'),
|
||||||
info: createLogMethod("info"),
|
info: createLogMethod('info'),
|
||||||
warn: createLogMethod("warn"),
|
warn: createLogMethod('warn'),
|
||||||
error: createLogMethod("error"),
|
error: createLogMethod('error'),
|
||||||
success: createLogMethod("success"),
|
success: createLogMethod('success'),
|
||||||
log: log, // Also expose the raw log function
|
log: log // Also expose the raw log function
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export a default logger instance
|
// Export a default logger instance
|
||||||
|
|||||||
@@ -3,63 +3,79 @@
|
|||||||
* Tool for adding a dependency to a task
|
* Tool for adding a dependency to a task
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { addDependencyDirect } from "../core/task-master-core.js";
|
import { addDependencyDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the addDependency tool with the MCP server
|
* Register the addDependency tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerAddDependencyTool(server) {
|
export function registerAddDependencyTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "add_dependency",
|
name: 'add_dependency',
|
||||||
description: "Add a dependency relationship between two tasks",
|
description: 'Add a dependency relationship between two tasks',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe("ID of task that will depend on another task"),
|
id: z.string().describe('ID of task that will depend on another task'),
|
||||||
dependsOn: z.string().describe("ID of task that will become a dependency"),
|
dependsOn: z
|
||||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
.string()
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
.describe('ID of task that will become a dependency'),
|
||||||
}),
|
file: z
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.string()
|
||||||
try {
|
.optional()
|
||||||
log.info(`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`);
|
.describe('Path to the tasks file (default: tasks/tasks.json)'),
|
||||||
reportProgress({ progress: 0 });
|
projectRoot: z
|
||||||
|
.string()
|
||||||
// Get project root using the utility function
|
.optional()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.describe(
|
||||||
|
'Root directory of the project (default: current working directory)'
|
||||||
// Fallback to args.projectRoot if session didn't provide one
|
)
|
||||||
if (!rootFolder && args.projectRoot) {
|
}),
|
||||||
rootFolder = args.projectRoot;
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
try {
|
||||||
}
|
log.info(
|
||||||
|
`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`
|
||||||
// Call the direct function with the resolved rootFolder
|
);
|
||||||
const result = await addDependencyDirect({
|
reportProgress({ progress: 0 });
|
||||||
projectRoot: rootFolder,
|
|
||||||
...args
|
|
||||||
}, log, { reportProgress, mcpLog: log, session});
|
|
||||||
|
|
||||||
reportProgress({ progress: 100 });
|
// Get project root using the utility function
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
// Log result
|
|
||||||
if (result.success) {
|
// Fallback to args.projectRoot if session didn't provide one
|
||||||
log.info(`Successfully added dependency: ${result.data.message}`);
|
if (!rootFolder && args.projectRoot) {
|
||||||
} else {
|
rootFolder = args.projectRoot;
|
||||||
log.error(`Failed to add dependency: ${result.error.message}`);
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use handleApiResult to format the response
|
// Call the direct function with the resolved rootFolder
|
||||||
return handleApiResult(result, log, 'Error adding dependency');
|
const result = await addDependencyDirect(
|
||||||
} catch (error) {
|
{
|
||||||
log.error(`Error in addDependency tool: ${error.message}`);
|
projectRoot: rootFolder,
|
||||||
return createErrorResponse(error.message);
|
...args
|
||||||
}
|
},
|
||||||
},
|
log,
|
||||||
});
|
{ reportProgress, mcpLog: log, session }
|
||||||
}
|
);
|
||||||
|
|
||||||
|
reportProgress({ progress: 100 });
|
||||||
|
|
||||||
|
// Log result
|
||||||
|
if (result.success) {
|
||||||
|
log.info(`Successfully added dependency: ${result.data.message}`);
|
||||||
|
} else {
|
||||||
|
log.error(`Failed to add dependency: ${result.error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use handleApiResult to format the response
|
||||||
|
return handleApiResult(result, log, 'Error adding dependency');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in addDependency tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,61 +3,94 @@
|
|||||||
* Tool for adding subtasks to existing tasks
|
* Tool for adding subtasks to existing tasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { addSubtaskDirect } from "../core/task-master-core.js";
|
import { addSubtaskDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the addSubtask tool with the MCP server
|
* Register the addSubtask tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerAddSubtaskTool(server) {
|
export function registerAddSubtaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "add_subtask",
|
name: 'add_subtask',
|
||||||
description: "Add a subtask to an existing task",
|
description: 'Add a subtask to an existing task',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe("Parent task ID (required)"),
|
id: z.string().describe('Parent task ID (required)'),
|
||||||
taskId: z.string().optional().describe("Existing task ID to convert to subtask"),
|
taskId: z
|
||||||
title: z.string().optional().describe("Title for the new subtask (when creating a new subtask)"),
|
.string()
|
||||||
description: z.string().optional().describe("Description for the new subtask"),
|
.optional()
|
||||||
details: z.string().optional().describe("Implementation details for the new subtask"),
|
.describe('Existing task ID to convert to subtask'),
|
||||||
status: z.string().optional().describe("Status for the new subtask (default: 'pending')"),
|
title: z
|
||||||
dependencies: z.string().optional().describe("Comma-separated list of dependency IDs for the new subtask"),
|
.string()
|
||||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
.optional()
|
||||||
skipGenerate: z.boolean().optional().describe("Skip regenerating task files"),
|
.describe('Title for the new subtask (when creating a new subtask)'),
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
description: z
|
||||||
}),
|
.string()
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.optional()
|
||||||
try {
|
.describe('Description for the new subtask'),
|
||||||
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
details: z
|
||||||
|
.string()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.optional()
|
||||||
|
.describe('Implementation details for the new subtask'),
|
||||||
if (!rootFolder && args.projectRoot) {
|
status: z
|
||||||
rootFolder = args.projectRoot;
|
.string()
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
.optional()
|
||||||
}
|
.describe("Status for the new subtask (default: 'pending')"),
|
||||||
|
dependencies: z
|
||||||
const result = await addSubtaskDirect({
|
.string()
|
||||||
projectRoot: rootFolder,
|
.optional()
|
||||||
...args
|
.describe('Comma-separated list of dependency IDs for the new subtask'),
|
||||||
}, log, { reportProgress, mcpLog: log, session});
|
file: z
|
||||||
|
.string()
|
||||||
if (result.success) {
|
.optional()
|
||||||
log.info(`Subtask added successfully: ${result.data.message}`);
|
.describe('Path to the tasks file (default: tasks/tasks.json)'),
|
||||||
} else {
|
skipGenerate: z
|
||||||
log.error(`Failed to add subtask: ${result.error.message}`);
|
.boolean()
|
||||||
}
|
.optional()
|
||||||
|
.describe('Skip regenerating task files'),
|
||||||
return handleApiResult(result, log, 'Error adding subtask');
|
projectRoot: z
|
||||||
} catch (error) {
|
.string()
|
||||||
log.error(`Error in addSubtask tool: ${error.message}`);
|
.optional()
|
||||||
return createErrorResponse(error.message);
|
.describe(
|
||||||
}
|
'Root directory of the project (default: current working directory)'
|
||||||
},
|
)
|
||||||
});
|
}),
|
||||||
}
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
|
try {
|
||||||
|
log.info(`Adding subtask with args: ${JSON.stringify(args)}`);
|
||||||
|
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
|
if (!rootFolder && args.projectRoot) {
|
||||||
|
rootFolder = args.projectRoot;
|
||||||
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await addSubtaskDirect(
|
||||||
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
|
...args
|
||||||
|
},
|
||||||
|
log,
|
||||||
|
{ reportProgress, mcpLog: log, session }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(`Subtask added successfully: ${result.data.message}`);
|
||||||
|
} else {
|
||||||
|
log.error(`Failed to add subtask: ${result.error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error adding subtask');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in addSubtask tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,56 +3,72 @@
|
|||||||
* Tool to add a new task using AI
|
* Tool to add a new task using AI
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
createContentResponse,
|
createContentResponse,
|
||||||
getProjectRootFromSession,
|
getProjectRootFromSession,
|
||||||
executeTaskMasterCommand,
|
executeTaskMasterCommand,
|
||||||
handleApiResult
|
handleApiResult
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { addTaskDirect } from "../core/task-master-core.js";
|
import { addTaskDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the addTask tool with the MCP server
|
* Register the addTask tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerAddTaskTool(server) {
|
export function registerAddTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "add_task",
|
name: 'add_task',
|
||||||
description: "Add a new task using AI",
|
description: 'Add a new task using AI',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
prompt: z.string().describe("Description of the task to add"),
|
prompt: z.string().describe('Description of the task to add'),
|
||||||
dependencies: z.string().optional().describe("Comma-separated list of task IDs this task depends on"),
|
dependencies: z
|
||||||
priority: z.string().optional().describe("Task priority (high, medium, low)"),
|
.string()
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
.optional()
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project"),
|
.describe('Comma-separated list of task IDs this task depends on'),
|
||||||
research: z.boolean().optional().describe("Whether to use research capabilities for task creation")
|
priority: z
|
||||||
}),
|
.string()
|
||||||
execute: async (args, { log, reportProgress, session }) => {
|
.optional()
|
||||||
try {
|
.describe('Task priority (high, medium, low)'),
|
||||||
log.info(`Starting add-task with args: ${JSON.stringify(args)}`);
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
|
projectRoot: z
|
||||||
// Get project root from session
|
.string()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.optional()
|
||||||
|
.describe('Root directory of the project'),
|
||||||
if (!rootFolder && args.projectRoot) {
|
research: z
|
||||||
rootFolder = args.projectRoot;
|
.boolean()
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
.optional()
|
||||||
}
|
.describe('Whether to use research capabilities for task creation')
|
||||||
|
}),
|
||||||
// Call the direct function
|
execute: async (args, { log, reportProgress, session }) => {
|
||||||
const result = await addTaskDirect({
|
try {
|
||||||
...args,
|
log.info(`Starting add-task with args: ${JSON.stringify(args)}`);
|
||||||
projectRoot: rootFolder
|
|
||||||
}, log, { reportProgress, session });
|
// Get project root from session
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
// Return the result
|
|
||||||
return handleApiResult(result, log);
|
if (!rootFolder && args.projectRoot) {
|
||||||
} catch (error) {
|
rootFolder = args.projectRoot;
|
||||||
log.error(`Error in add-task tool: ${error.message}`);
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
return createErrorResponse(error.message);
|
}
|
||||||
}
|
|
||||||
}
|
// Call the direct function
|
||||||
});
|
const result = await addTaskDirect(
|
||||||
}
|
{
|
||||||
|
...args,
|
||||||
|
projectRoot: rootFolder
|
||||||
|
},
|
||||||
|
log,
|
||||||
|
{ reportProgress, session }
|
||||||
|
);
|
||||||
|
|
||||||
|
// Return the result
|
||||||
|
return handleApiResult(result, log);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in add-task tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,58 +3,95 @@
|
|||||||
* Tool for analyzing task complexity and generating recommendations
|
* Tool for analyzing task complexity and generating recommendations
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { analyzeTaskComplexityDirect } from "../core/task-master-core.js";
|
import { analyzeTaskComplexityDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the analyze tool with the MCP server
|
* Register the analyze tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerAnalyzeTool(server) {
|
export function registerAnalyzeTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "analyze_project_complexity",
|
name: 'analyze_project_complexity',
|
||||||
description: "Analyze task complexity and generate expansion recommendations",
|
description:
|
||||||
parameters: z.object({
|
'Analyze task complexity and generate expansion recommendations',
|
||||||
output: z.string().optional().describe("Output file path for the report (default: scripts/task-complexity-report.json)"),
|
parameters: z.object({
|
||||||
model: z.string().optional().describe("LLM model to use for analysis (defaults to configured model)"),
|
output: z
|
||||||
threshold: z.union([z.number(), z.string()]).optional().describe("Minimum complexity score to recommend expansion (1-10) (default: 5)"),
|
.string()
|
||||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
.optional()
|
||||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed complexity analysis"),
|
.describe(
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
'Output file path for the report (default: scripts/task-complexity-report.json)'
|
||||||
}),
|
),
|
||||||
execute: async (args, { log, session }) => {
|
model: z
|
||||||
try {
|
.string()
|
||||||
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
|
.optional()
|
||||||
|
.describe(
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
'LLM model to use for analysis (defaults to configured model)'
|
||||||
|
),
|
||||||
if (!rootFolder && args.projectRoot) {
|
threshold: z
|
||||||
rootFolder = args.projectRoot;
|
.union([z.number(), z.string()])
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
.optional()
|
||||||
}
|
.describe(
|
||||||
|
'Minimum complexity score to recommend expansion (1-10) (default: 5)'
|
||||||
const result = await analyzeTaskComplexityDirect({
|
),
|
||||||
projectRoot: rootFolder,
|
file: z
|
||||||
...args
|
.string()
|
||||||
}, log, { session });
|
.optional()
|
||||||
|
.describe('Path to the tasks file (default: tasks/tasks.json)'),
|
||||||
if (result.success) {
|
research: z
|
||||||
log.info(`Task complexity analysis complete: ${result.data.message}`);
|
.boolean()
|
||||||
log.info(`Report summary: ${JSON.stringify(result.data.reportSummary)}`);
|
.optional()
|
||||||
} else {
|
.describe('Use Perplexity AI for research-backed complexity analysis'),
|
||||||
log.error(`Failed to analyze task complexity: ${result.error.message}`);
|
projectRoot: z
|
||||||
}
|
.string()
|
||||||
|
.optional()
|
||||||
return handleApiResult(result, log, 'Error analyzing task complexity');
|
.describe(
|
||||||
} catch (error) {
|
'Root directory of the project (default: current working directory)'
|
||||||
log.error(`Error in analyze tool: ${error.message}`);
|
)
|
||||||
return createErrorResponse(error.message);
|
}),
|
||||||
}
|
execute: async (args, { log, session }) => {
|
||||||
},
|
try {
|
||||||
});
|
log.info(
|
||||||
}
|
`Analyzing task complexity with args: ${JSON.stringify(args)}`
|
||||||
|
);
|
||||||
|
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
|
if (!rootFolder && args.projectRoot) {
|
||||||
|
rootFolder = args.projectRoot;
|
||||||
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await analyzeTaskComplexityDirect(
|
||||||
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
|
...args
|
||||||
|
},
|
||||||
|
log,
|
||||||
|
{ session }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(`Task complexity analysis complete: ${result.data.message}`);
|
||||||
|
log.info(
|
||||||
|
`Report summary: ${JSON.stringify(result.data.reportSummary)}`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
`Failed to analyze task complexity: ${result.error.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error analyzing task complexity');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in analyze tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,61 +3,78 @@
|
|||||||
* Tool for clearing subtasks from parent tasks
|
* Tool for clearing subtasks from parent tasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { clearSubtasksDirect } from "../core/task-master-core.js";
|
import { clearSubtasksDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the clearSubtasks tool with the MCP server
|
* Register the clearSubtasks tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerClearSubtasksTool(server) {
|
export function registerClearSubtasksTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "clear_subtasks",
|
name: 'clear_subtasks',
|
||||||
description: "Clear subtasks from specified tasks",
|
description: 'Clear subtasks from specified tasks',
|
||||||
parameters: z.object({
|
parameters: z
|
||||||
id: z.string().optional().describe("Task IDs (comma-separated) to clear subtasks from"),
|
.object({
|
||||||
all: z.boolean().optional().describe("Clear subtasks from all tasks"),
|
id: z
|
||||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
.string()
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
.optional()
|
||||||
}).refine(data => data.id || data.all, {
|
.describe('Task IDs (comma-separated) to clear subtasks from'),
|
||||||
message: "Either 'id' or 'all' parameter must be provided",
|
all: z.boolean().optional().describe('Clear subtasks from all tasks'),
|
||||||
path: ["id", "all"]
|
file: z
|
||||||
}),
|
.string()
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.optional()
|
||||||
try {
|
.describe('Path to the tasks file (default: tasks/tasks.json)'),
|
||||||
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
projectRoot: z
|
||||||
await reportProgress({ progress: 0 });
|
.string()
|
||||||
|
.optional()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.describe(
|
||||||
|
'Root directory of the project (default: current working directory)'
|
||||||
if (!rootFolder && args.projectRoot) {
|
)
|
||||||
rootFolder = args.projectRoot;
|
})
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
.refine((data) => data.id || data.all, {
|
||||||
}
|
message: "Either 'id' or 'all' parameter must be provided",
|
||||||
|
path: ['id', 'all']
|
||||||
const result = await clearSubtasksDirect({
|
}),
|
||||||
projectRoot: rootFolder,
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
...args
|
try {
|
||||||
}, log, { reportProgress, mcpLog: log, session});
|
log.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);
|
||||||
|
await reportProgress({ progress: 0 });
|
||||||
reportProgress({ progress: 100 });
|
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
if (result.success) {
|
|
||||||
log.info(`Subtasks cleared successfully: ${result.data.message}`);
|
if (!rootFolder && args.projectRoot) {
|
||||||
} else {
|
rootFolder = args.projectRoot;
|
||||||
log.error(`Failed to clear subtasks: ${result.error.message}`);
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error clearing subtasks');
|
const result = await clearSubtasksDirect(
|
||||||
} catch (error) {
|
{
|
||||||
log.error(`Error in clearSubtasks tool: ${error.message}`);
|
projectRoot: rootFolder,
|
||||||
return createErrorResponse(error.message);
|
...args
|
||||||
}
|
},
|
||||||
},
|
log,
|
||||||
});
|
{ reportProgress, mcpLog: log, session }
|
||||||
}
|
);
|
||||||
|
|
||||||
|
reportProgress({ progress: 100 });
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(`Subtasks cleared successfully: ${result.data.message}`);
|
||||||
|
} else {
|
||||||
|
log.error(`Failed to clear subtasks: ${result.error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error clearing subtasks');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in clearSubtasks tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,56 +3,81 @@
|
|||||||
* Tool for displaying the complexity analysis report
|
* Tool for displaying the complexity analysis report
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { complexityReportDirect } from "../core/task-master-core.js";
|
import { complexityReportDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the complexityReport tool with the MCP server
|
* Register the complexityReport tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerComplexityReportTool(server) {
|
export function registerComplexityReportTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "complexity_report",
|
name: 'complexity_report',
|
||||||
description: "Display the complexity analysis report in a readable format",
|
description: 'Display the complexity analysis report in a readable format',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
file: z.string().optional().describe("Path to the report file (default: scripts/task-complexity-report.json)"),
|
file: z
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
.string()
|
||||||
}),
|
.optional()
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.describe(
|
||||||
try {
|
'Path to the report file (default: scripts/task-complexity-report.json)'
|
||||||
log.info(`Getting complexity report with args: ${JSON.stringify(args)}`);
|
),
|
||||||
// await reportProgress({ progress: 0 });
|
projectRoot: z
|
||||||
|
.string()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.optional()
|
||||||
|
.describe(
|
||||||
if (!rootFolder && args.projectRoot) {
|
'Root directory of the project (default: current working directory)'
|
||||||
rootFolder = args.projectRoot;
|
)
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
}),
|
||||||
}
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
|
try {
|
||||||
const result = await complexityReportDirect({
|
log.info(
|
||||||
projectRoot: rootFolder,
|
`Getting complexity report with args: ${JSON.stringify(args)}`
|
||||||
...args
|
);
|
||||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
// await reportProgress({ progress: 0 });
|
||||||
|
|
||||||
// await reportProgress({ progress: 100 });
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
if (result.success) {
|
if (!rootFolder && args.projectRoot) {
|
||||||
log.info(`Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}`);
|
rootFolder = args.projectRoot;
|
||||||
} else {
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
log.error(`Failed to retrieve complexity report: ${result.error.message}`);
|
}
|
||||||
}
|
|
||||||
|
const result = await complexityReportDirect(
|
||||||
return handleApiResult(result, log, 'Error retrieving complexity report');
|
{
|
||||||
} catch (error) {
|
projectRoot: rootFolder,
|
||||||
log.error(`Error in complexity-report tool: ${error.message}`);
|
...args
|
||||||
return createErrorResponse(`Failed to retrieve complexity report: ${error.message}`);
|
},
|
||||||
}
|
log /*, { reportProgress, mcpLog: log, session}*/
|
||||||
},
|
);
|
||||||
});
|
|
||||||
}
|
// await reportProgress({ progress: 100 });
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(
|
||||||
|
`Successfully retrieved complexity report${result.fromCache ? ' (from cache)' : ''}`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
`Failed to retrieve complexity report: ${result.error.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(
|
||||||
|
result,
|
||||||
|
log,
|
||||||
|
'Error retrieving complexity report'
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in complexity-report tool: ${error.message}`);
|
||||||
|
return createErrorResponse(
|
||||||
|
`Failed to retrieve complexity report: ${error.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,57 +3,87 @@
|
|||||||
* Tool for expanding all pending tasks with subtasks
|
* Tool for expanding all pending tasks with subtasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { expandAllTasksDirect } from "../core/task-master-core.js";
|
import { expandAllTasksDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the expandAll tool with the MCP server
|
* Register the expandAll tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerExpandAllTool(server) {
|
export function registerExpandAllTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "expand_all",
|
name: 'expand_all',
|
||||||
description: "Expand all pending tasks into subtasks",
|
description: 'Expand all pending tasks into subtasks',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
num: z.string().optional().describe("Number of subtasks to generate for each task"),
|
num: z
|
||||||
research: z.boolean().optional().describe("Enable Perplexity AI for research-backed subtask generation"),
|
.string()
|
||||||
prompt: z.string().optional().describe("Additional context to guide subtask generation"),
|
.optional()
|
||||||
force: z.boolean().optional().describe("Force regeneration of subtasks for tasks that already have them"),
|
.describe('Number of subtasks to generate for each task'),
|
||||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
research: z
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
.boolean()
|
||||||
}),
|
.optional()
|
||||||
execute: async (args, { log, session }) => {
|
.describe(
|
||||||
try {
|
'Enable Perplexity AI for research-backed subtask generation'
|
||||||
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
),
|
||||||
|
prompt: z
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.string()
|
||||||
|
.optional()
|
||||||
if (!rootFolder && args.projectRoot) {
|
.describe('Additional context to guide subtask generation'),
|
||||||
rootFolder = args.projectRoot;
|
force: z
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
.boolean()
|
||||||
}
|
.optional()
|
||||||
|
.describe(
|
||||||
const result = await expandAllTasksDirect({
|
'Force regeneration of subtasks for tasks that already have them'
|
||||||
projectRoot: rootFolder,
|
),
|
||||||
...args
|
file: z
|
||||||
}, log, { session });
|
.string()
|
||||||
|
.optional()
|
||||||
if (result.success) {
|
.describe('Path to the tasks file (default: tasks/tasks.json)'),
|
||||||
log.info(`Successfully expanded all tasks: ${result.data.message}`);
|
projectRoot: z
|
||||||
} else {
|
.string()
|
||||||
log.error(`Failed to expand all tasks: ${result.error?.message || 'Unknown error'}`);
|
.optional()
|
||||||
}
|
.describe(
|
||||||
|
'Root directory of the project (default: current working directory)'
|
||||||
return handleApiResult(result, log, 'Error expanding all tasks');
|
)
|
||||||
} catch (error) {
|
}),
|
||||||
log.error(`Error in expand-all tool: ${error.message}`);
|
execute: async (args, { log, session }) => {
|
||||||
return createErrorResponse(error.message);
|
try {
|
||||||
}
|
log.info(`Expanding all tasks with args: ${JSON.stringify(args)}`);
|
||||||
},
|
|
||||||
});
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
}
|
|
||||||
|
if (!rootFolder && args.projectRoot) {
|
||||||
|
rootFolder = args.projectRoot;
|
||||||
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await expandAllTasksDirect(
|
||||||
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
|
...args
|
||||||
|
},
|
||||||
|
log,
|
||||||
|
{ session }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(`Successfully expanded all tasks: ${result.data.message}`);
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
`Failed to expand all tasks: ${result.error?.message || 'Unknown error'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error expanding all tasks');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in expand-all tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,75 +3,88 @@
|
|||||||
* Tool to expand a task into subtasks
|
* Tool to expand a task into subtasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { expandTaskDirect } from "../core/task-master-core.js";
|
import { expandTaskDirect } from '../core/task-master-core.js';
|
||||||
import fs from "fs";
|
import fs from 'fs';
|
||||||
import path from "path";
|
import path from 'path';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the expand-task tool with the MCP server
|
* Register the expand-task tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerExpandTaskTool(server) {
|
export function registerExpandTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "expand_task",
|
name: 'expand_task',
|
||||||
description: "Expand a task into subtasks for detailed implementation",
|
description: 'Expand a task into subtasks for detailed implementation',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe("ID of task to expand"),
|
id: z.string().describe('ID of task to expand'),
|
||||||
num: z.union([z.string(), z.number()]).optional().describe("Number of subtasks to generate"),
|
num: z
|
||||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed generation"),
|
.union([z.string(), z.number()])
|
||||||
prompt: z.string().optional().describe("Additional context for subtask generation"),
|
.optional()
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
.describe('Number of subtasks to generate'),
|
||||||
projectRoot: z
|
research: z
|
||||||
.string()
|
.boolean()
|
||||||
.optional()
|
.optional()
|
||||||
.describe(
|
.describe('Use Perplexity AI for research-backed generation'),
|
||||||
"Root directory of the project (default: current working directory)"
|
prompt: z
|
||||||
),
|
.string()
|
||||||
}),
|
.optional()
|
||||||
execute: async (args, { log, reportProgress, session }) => {
|
.describe('Additional context for subtask generation'),
|
||||||
try {
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
log.info(`Starting expand-task with args: ${JSON.stringify(args)}`);
|
projectRoot: z
|
||||||
|
.string()
|
||||||
// Get project root from session
|
.optional()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.describe(
|
||||||
|
'Root directory of the project (default: current working directory)'
|
||||||
if (!rootFolder && args.projectRoot) {
|
)
|
||||||
rootFolder = args.projectRoot;
|
}),
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
execute: async (args, { log, reportProgress, session }) => {
|
||||||
}
|
try {
|
||||||
|
log.info(`Starting expand-task with args: ${JSON.stringify(args)}`);
|
||||||
log.info(`Project root resolved to: ${rootFolder}`);
|
|
||||||
|
// Get project root from session
|
||||||
// Check for tasks.json in the standard locations
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
const tasksJsonPath = path.join(rootFolder, 'tasks', 'tasks.json');
|
|
||||||
|
if (!rootFolder && args.projectRoot) {
|
||||||
if (fs.existsSync(tasksJsonPath)) {
|
rootFolder = args.projectRoot;
|
||||||
log.info(`Found tasks.json at ${tasksJsonPath}`);
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
// Add the file parameter directly to args
|
}
|
||||||
args.file = tasksJsonPath;
|
|
||||||
} else {
|
log.info(`Project root resolved to: ${rootFolder}`);
|
||||||
log.warn(`Could not find tasks.json at ${tasksJsonPath}`);
|
|
||||||
}
|
// Check for tasks.json in the standard locations
|
||||||
|
const tasksJsonPath = path.join(rootFolder, 'tasks', 'tasks.json');
|
||||||
// Call direct function with only session in the context, not reportProgress
|
|
||||||
// Use the pattern recommended in the MCP guidelines
|
if (fs.existsSync(tasksJsonPath)) {
|
||||||
const result = await expandTaskDirect({
|
log.info(`Found tasks.json at ${tasksJsonPath}`);
|
||||||
...args,
|
// Add the file parameter directly to args
|
||||||
projectRoot: rootFolder
|
args.file = tasksJsonPath;
|
||||||
}, log, { session }); // Only pass session, NOT reportProgress
|
} else {
|
||||||
|
log.warn(`Could not find tasks.json at ${tasksJsonPath}`);
|
||||||
// Return the result
|
}
|
||||||
return handleApiResult(result, log, 'Error expanding task');
|
|
||||||
} catch (error) {
|
// Call direct function with only session in the context, not reportProgress
|
||||||
log.error(`Error in expand task tool: ${error.message}`);
|
// Use the pattern recommended in the MCP guidelines
|
||||||
return createErrorResponse(error.message);
|
const result = await expandTaskDirect(
|
||||||
}
|
{
|
||||||
},
|
...args,
|
||||||
});
|
projectRoot: rootFolder
|
||||||
}
|
},
|
||||||
|
log,
|
||||||
|
{ session }
|
||||||
|
); // Only pass session, NOT reportProgress
|
||||||
|
|
||||||
|
// Return the result
|
||||||
|
return handleApiResult(result, log, 'Error expanding task');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in expand task tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,56 +3,65 @@
|
|||||||
* Tool for automatically fixing invalid task dependencies
|
* Tool for automatically fixing invalid task dependencies
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { fixDependenciesDirect } from "../core/task-master-core.js";
|
import { fixDependenciesDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the fixDependencies tool with the MCP server
|
* Register the fixDependencies tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerFixDependenciesTool(server) {
|
export function registerFixDependenciesTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "fix_dependencies",
|
name: 'fix_dependencies',
|
||||||
description: "Fix invalid dependencies in tasks automatically",
|
description: 'Fix invalid dependencies in tasks automatically',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
projectRoot: z
|
||||||
}),
|
.string()
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.optional()
|
||||||
try {
|
.describe(
|
||||||
log.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);
|
'Root directory of the project (default: current working directory)'
|
||||||
await reportProgress({ progress: 0 });
|
)
|
||||||
|
}),
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
|
try {
|
||||||
if (!rootFolder && args.projectRoot) {
|
log.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);
|
||||||
rootFolder = args.projectRoot;
|
await reportProgress({ progress: 0 });
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
|
||||||
}
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
const result = await fixDependenciesDirect({
|
if (!rootFolder && args.projectRoot) {
|
||||||
projectRoot: rootFolder,
|
rootFolder = args.projectRoot;
|
||||||
...args
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
}, log, { reportProgress, mcpLog: log, session});
|
}
|
||||||
|
|
||||||
await reportProgress({ progress: 100 });
|
const result = await fixDependenciesDirect(
|
||||||
|
{
|
||||||
if (result.success) {
|
projectRoot: rootFolder,
|
||||||
log.info(`Successfully fixed dependencies: ${result.data.message}`);
|
...args
|
||||||
} else {
|
},
|
||||||
log.error(`Failed to fix dependencies: ${result.error.message}`);
|
log,
|
||||||
}
|
{ reportProgress, mcpLog: log, session }
|
||||||
|
);
|
||||||
return handleApiResult(result, log, 'Error fixing dependencies');
|
|
||||||
} catch (error) {
|
await reportProgress({ progress: 100 });
|
||||||
log.error(`Error in fixDependencies tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
if (result.success) {
|
||||||
}
|
log.info(`Successfully fixed dependencies: ${result.data.message}`);
|
||||||
}
|
} else {
|
||||||
});
|
log.error(`Failed to fix dependencies: ${result.error.message}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error fixing dependencies');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in fixDependencies tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,62 +3,71 @@
|
|||||||
* Tool to generate individual task files from tasks.json
|
* Tool to generate individual task files from tasks.json
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { generateTaskFilesDirect } from "../core/task-master-core.js";
|
import { generateTaskFilesDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the generate tool with the MCP server
|
* Register the generate tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerGenerateTool(server) {
|
export function registerGenerateTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "generate",
|
name: 'generate',
|
||||||
description: "Generates individual task files in tasks/ directory based on tasks.json",
|
description:
|
||||||
parameters: z.object({
|
'Generates individual task files in tasks/ directory based on tasks.json',
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
parameters: z.object({
|
||||||
output: z.string().optional().describe("Output directory (default: same directory as tasks file)"),
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
projectRoot: z
|
output: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
.optional()
|
||||||
.describe(
|
.describe('Output directory (default: same directory as tasks file)'),
|
||||||
"Root directory of the project (default: current working directory)"
|
projectRoot: z
|
||||||
),
|
.string()
|
||||||
}),
|
.optional()
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.describe(
|
||||||
try {
|
'Root directory of the project (default: current working directory)'
|
||||||
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
)
|
||||||
// await reportProgress({ progress: 0 });
|
}),
|
||||||
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
try {
|
||||||
|
log.info(`Generating task files with args: ${JSON.stringify(args)}`);
|
||||||
if (!rootFolder && args.projectRoot) {
|
// await reportProgress({ progress: 0 });
|
||||||
rootFolder = args.projectRoot;
|
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
}
|
|
||||||
|
if (!rootFolder && args.projectRoot) {
|
||||||
const result = await generateTaskFilesDirect({
|
rootFolder = args.projectRoot;
|
||||||
projectRoot: rootFolder,
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
...args
|
}
|
||||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
|
||||||
|
const result = await generateTaskFilesDirect(
|
||||||
// await reportProgress({ progress: 100 });
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
if (result.success) {
|
...args
|
||||||
log.info(`Successfully generated task files: ${result.data.message}`);
|
},
|
||||||
} else {
|
log /*, { reportProgress, mcpLog: log, session}*/
|
||||||
log.error(`Failed to generate task files: ${result.error?.message || 'Unknown error'}`);
|
);
|
||||||
}
|
|
||||||
|
// await reportProgress({ progress: 100 });
|
||||||
return handleApiResult(result, log, 'Error generating task files');
|
|
||||||
} catch (error) {
|
if (result.success) {
|
||||||
log.error(`Error in generate tool: ${error.message}`);
|
log.info(`Successfully generated task files: ${result.data.message}`);
|
||||||
return createErrorResponse(error.message);
|
} else {
|
||||||
}
|
log.error(
|
||||||
},
|
`Failed to generate task files: ${result.error?.message || 'Unknown error'}`
|
||||||
});
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error generating task files');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in generate tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,35 +8,40 @@ import { createErrorResponse, createContentResponse } from './utils.js'; // Assu
|
|||||||
* @param {AsyncOperationManager} asyncManager - The async operation manager.
|
* @param {AsyncOperationManager} asyncManager - The async operation manager.
|
||||||
*/
|
*/
|
||||||
export function registerGetOperationStatusTool(server, asyncManager) {
|
export function registerGetOperationStatusTool(server, asyncManager) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: 'get_operation_status',
|
name: 'get_operation_status',
|
||||||
description: 'Retrieves the status and result/error of a background operation.',
|
description:
|
||||||
parameters: z.object({
|
'Retrieves the status and result/error of a background operation.',
|
||||||
operationId: z.string().describe('The ID of the operation to check.'),
|
parameters: z.object({
|
||||||
}),
|
operationId: z.string().describe('The ID of the operation to check.')
|
||||||
execute: async (args, { log }) => {
|
}),
|
||||||
try {
|
execute: async (args, { log }) => {
|
||||||
const { operationId } = args;
|
try {
|
||||||
log.info(`Checking status for operation ID: ${operationId}`);
|
const { operationId } = args;
|
||||||
|
log.info(`Checking status for operation ID: ${operationId}`);
|
||||||
|
|
||||||
const status = asyncManager.getStatus(operationId);
|
const status = asyncManager.getStatus(operationId);
|
||||||
|
|
||||||
// Status will now always return an object, but it might have status='not_found'
|
// Status will now always return an object, but it might have status='not_found'
|
||||||
if (status.status === 'not_found') {
|
if (status.status === 'not_found') {
|
||||||
log.warn(`Operation ID not found: ${operationId}`);
|
log.warn(`Operation ID not found: ${operationId}`);
|
||||||
return createErrorResponse(
|
return createErrorResponse(
|
||||||
status.error?.message || `Operation ID not found: ${operationId}`,
|
status.error?.message || `Operation ID not found: ${operationId}`,
|
||||||
status.error?.code || 'OPERATION_NOT_FOUND'
|
status.error?.code || 'OPERATION_NOT_FOUND'
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(`Status for ${operationId}: ${status.status}`);
|
log.info(`Status for ${operationId}: ${status.status}`);
|
||||||
return createContentResponse(status);
|
return createContentResponse(status);
|
||||||
|
} catch (error) {
|
||||||
} catch (error) {
|
log.error(`Error in get_operation_status tool: ${error.message}`, {
|
||||||
log.error(`Error in get_operation_status tool: ${error.message}`, { stack: error.stack });
|
stack: error.stack
|
||||||
return createErrorResponse(`Failed to get operation status: ${error.message}`, 'GET_STATUS_ERROR');
|
});
|
||||||
}
|
return createErrorResponse(
|
||||||
},
|
`Failed to get operation status: ${error.message}`,
|
||||||
});
|
'GET_STATUS_ERROR'
|
||||||
}
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,13 +3,13 @@
|
|||||||
* Tool to get task details by ID
|
* Tool to get task details by ID
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { showTaskDirect } from "../core/task-master-core.js";
|
import { showTaskDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Custom processor function that removes allTasks from the response
|
* Custom processor function that removes allTasks from the response
|
||||||
@@ -17,16 +17,16 @@ import { showTaskDirect } from "../core/task-master-core.js";
|
|||||||
* @returns {Object} - The processed data with allTasks removed
|
* @returns {Object} - The processed data with allTasks removed
|
||||||
*/
|
*/
|
||||||
function processTaskResponse(data) {
|
function processTaskResponse(data) {
|
||||||
if (!data) return data;
|
if (!data) return data;
|
||||||
|
|
||||||
// If we have the expected structure with task and allTasks
|
// If we have the expected structure with task and allTasks
|
||||||
if (data.task) {
|
if (data.task) {
|
||||||
// Return only the task object, removing the allTasks array
|
// Return only the task object, removing the allTasks array
|
||||||
return data.task;
|
return data.task;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If structure is unexpected, return as is
|
// If structure is unexpected, return as is
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -34,59 +34,75 @@ function processTaskResponse(data) {
|
|||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerShowTaskTool(server) {
|
export function registerShowTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "get_task",
|
name: 'get_task',
|
||||||
description: "Get detailed information about a specific task",
|
description: 'Get detailed information about a specific task',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe("Task ID to get"),
|
id: z.string().describe('Task ID to get'),
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
.optional()
|
||||||
.describe(
|
.describe(
|
||||||
"Root directory of the project (default: current working directory)"
|
'Root directory of the project (default: current working directory)'
|
||||||
),
|
)
|
||||||
}),
|
}),
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
// Log the session right at the start of execute
|
// Log the session right at the start of execute
|
||||||
log.info(`Session object received in execute: ${JSON.stringify(session)}`); // Use JSON.stringify for better visibility
|
log.info(
|
||||||
|
`Session object received in execute: ${JSON.stringify(session)}`
|
||||||
|
); // Use JSON.stringify for better visibility
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log.info(`Getting task details for ID: ${args.id}`);
|
log.info(`Getting task details for ID: ${args.id}`);
|
||||||
|
|
||||||
log.info(`Session object received in execute: ${JSON.stringify(session)}`); // Use JSON.stringify for better visibility
|
log.info(
|
||||||
|
`Session object received in execute: ${JSON.stringify(session)}`
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
); // Use JSON.stringify for better visibility
|
||||||
|
|
||||||
if (!rootFolder && args.projectRoot) {
|
|
||||||
rootFolder = args.projectRoot;
|
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
|
||||||
} else if (!rootFolder) {
|
|
||||||
// Ensure we always have *some* root, even if session failed and args didn't provide one
|
|
||||||
rootFolder = process.cwd();
|
|
||||||
log.warn(`Session and args failed to provide root, using CWD: ${rootFolder}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info(`Attempting to use project root: ${rootFolder}`); // Log the final resolved root
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
log.info(`Root folder: ${rootFolder}`); // Log the final resolved root
|
if (!rootFolder && args.projectRoot) {
|
||||||
const result = await showTaskDirect({
|
rootFolder = args.projectRoot;
|
||||||
projectRoot: rootFolder,
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
...args
|
} else if (!rootFolder) {
|
||||||
}, log);
|
// Ensure we always have *some* root, even if session failed and args didn't provide one
|
||||||
|
rootFolder = process.cwd();
|
||||||
if (result.success) {
|
log.warn(
|
||||||
log.info(`Successfully retrieved task details for ID: ${args.id}${result.fromCache ? ' (from cache)' : ''}`);
|
`Session and args failed to provide root, using CWD: ${rootFolder}`
|
||||||
} else {
|
);
|
||||||
log.error(`Failed to get task: ${result.error.message}`);
|
}
|
||||||
}
|
|
||||||
|
log.info(`Attempting to use project root: ${rootFolder}`); // Log the final resolved root
|
||||||
// Use our custom processor function to remove allTasks from the response
|
|
||||||
return handleApiResult(result, log, 'Error retrieving task details', processTaskResponse);
|
log.info(`Root folder: ${rootFolder}`); // Log the final resolved root
|
||||||
} catch (error) {
|
const result = await showTaskDirect(
|
||||||
log.error(`Error in get-task tool: ${error.message}\n${error.stack}`); // Add stack trace
|
{
|
||||||
return createErrorResponse(`Failed to get task: ${error.message}`);
|
projectRoot: rootFolder,
|
||||||
}
|
...args
|
||||||
},
|
},
|
||||||
});
|
log
|
||||||
}
|
);
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(
|
||||||
|
`Successfully retrieved task details for ID: ${args.id}${result.fromCache ? ' (from cache)' : ''}`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error(`Failed to get task: ${result.error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use our custom processor function to remove allTasks from the response
|
||||||
|
return handleApiResult(
|
||||||
|
result,
|
||||||
|
log,
|
||||||
|
'Error retrieving task details',
|
||||||
|
processTaskResponse
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in get-task tool: ${error.message}\n${error.stack}`); // Add stack trace
|
||||||
|
return createErrorResponse(`Failed to get task: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,63 +3,79 @@
|
|||||||
* Tool to get all tasks from Task Master
|
* Tool to get all tasks from Task Master
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { listTasksDirect } from "../core/task-master-core.js";
|
import { listTasksDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the getTasks tool with the MCP server
|
* Register the getTasks tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerListTasksTool(server) {
|
export function registerListTasksTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "get_tasks",
|
name: 'get_tasks',
|
||||||
description: "Get all tasks from Task Master, optionally filtering by status and including subtasks.",
|
description:
|
||||||
parameters: z.object({
|
'Get all tasks from Task Master, optionally filtering by status and including subtasks.',
|
||||||
status: z.string().optional().describe("Filter tasks by status (e.g., 'pending', 'done')"),
|
parameters: z.object({
|
||||||
withSubtasks: z
|
status: z
|
||||||
.boolean()
|
.string()
|
||||||
.optional()
|
.optional()
|
||||||
.describe("Include subtasks nested within their parent tasks in the response"),
|
.describe("Filter tasks by status (e.g., 'pending', 'done')"),
|
||||||
file: z.string().optional().describe("Path to the tasks file (relative to project root or absolute)"),
|
withSubtasks: z
|
||||||
projectRoot: z
|
.boolean()
|
||||||
.string()
|
.optional()
|
||||||
.optional()
|
.describe(
|
||||||
.describe(
|
'Include subtasks nested within their parent tasks in the response'
|
||||||
"Root directory of the project (default: automatically detected from session or CWD)"
|
),
|
||||||
),
|
file: z
|
||||||
}),
|
.string()
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.optional()
|
||||||
try {
|
.describe(
|
||||||
log.info(`Getting tasks with filters: ${JSON.stringify(args)}`);
|
'Path to the tasks file (relative to project root or absolute)'
|
||||||
// await reportProgress({ progress: 0 });
|
),
|
||||||
|
projectRoot: z
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.string()
|
||||||
|
.optional()
|
||||||
if (!rootFolder && args.projectRoot) {
|
.describe(
|
||||||
rootFolder = args.projectRoot;
|
'Root directory of the project (default: automatically detected from session or CWD)'
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
)
|
||||||
}
|
}),
|
||||||
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
const result = await listTasksDirect({
|
try {
|
||||||
projectRoot: rootFolder,
|
log.info(`Getting tasks with filters: ${JSON.stringify(args)}`);
|
||||||
...args
|
// await reportProgress({ progress: 0 });
|
||||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
// await reportProgress({ progress: 100 });
|
|
||||||
|
if (!rootFolder && args.projectRoot) {
|
||||||
log.info(`Retrieved ${result.success ? (result.data?.tasks?.length || 0) : 0} tasks${result.fromCache ? ' (from cache)' : ''}`);
|
rootFolder = args.projectRoot;
|
||||||
return handleApiResult(result, log, 'Error getting tasks');
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
} catch (error) {
|
}
|
||||||
log.error(`Error getting tasks: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
const result = await listTasksDirect(
|
||||||
}
|
{
|
||||||
},
|
projectRoot: rootFolder,
|
||||||
});
|
...args
|
||||||
|
},
|
||||||
|
log /*, { reportProgress, mcpLog: log, session}*/
|
||||||
|
);
|
||||||
|
|
||||||
|
// await reportProgress({ progress: 100 });
|
||||||
|
|
||||||
|
log.info(
|
||||||
|
`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks${result.fromCache ? ' (from cache)' : ''}`
|
||||||
|
);
|
||||||
|
return handleApiResult(result, log, 'Error getting tasks');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error getting tasks: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// We no longer need the formatTasksResponse function as we're returning raw JSON data
|
// We no longer need the formatTasksResponse function as we're returning raw JSON data
|
||||||
|
|||||||
@@ -3,28 +3,28 @@
|
|||||||
* Export all Task Master CLI tools for MCP server
|
* Export all Task Master CLI tools for MCP server
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { registerListTasksTool } from "./get-tasks.js";
|
import { registerListTasksTool } from './get-tasks.js';
|
||||||
import logger from "../logger.js";
|
import logger from '../logger.js';
|
||||||
import { registerSetTaskStatusTool } from "./set-task-status.js";
|
import { registerSetTaskStatusTool } from './set-task-status.js';
|
||||||
import { registerParsePRDTool } from "./parse-prd.js";
|
import { registerParsePRDTool } from './parse-prd.js';
|
||||||
import { registerUpdateTool } from "./update.js";
|
import { registerUpdateTool } from './update.js';
|
||||||
import { registerUpdateTaskTool } from "./update-task.js";
|
import { registerUpdateTaskTool } from './update-task.js';
|
||||||
import { registerUpdateSubtaskTool } from "./update-subtask.js";
|
import { registerUpdateSubtaskTool } from './update-subtask.js';
|
||||||
import { registerGenerateTool } from "./generate.js";
|
import { registerGenerateTool } from './generate.js';
|
||||||
import { registerShowTaskTool } from "./get-task.js";
|
import { registerShowTaskTool } from './get-task.js';
|
||||||
import { registerNextTaskTool } from "./next-task.js";
|
import { registerNextTaskTool } from './next-task.js';
|
||||||
import { registerExpandTaskTool } from "./expand-task.js";
|
import { registerExpandTaskTool } from './expand-task.js';
|
||||||
import { registerAddTaskTool } from "./add-task.js";
|
import { registerAddTaskTool } from './add-task.js';
|
||||||
import { registerAddSubtaskTool } from "./add-subtask.js";
|
import { registerAddSubtaskTool } from './add-subtask.js';
|
||||||
import { registerRemoveSubtaskTool } from "./remove-subtask.js";
|
import { registerRemoveSubtaskTool } from './remove-subtask.js';
|
||||||
import { registerAnalyzeTool } from "./analyze.js";
|
import { registerAnalyzeTool } from './analyze.js';
|
||||||
import { registerClearSubtasksTool } from "./clear-subtasks.js";
|
import { registerClearSubtasksTool } from './clear-subtasks.js';
|
||||||
import { registerExpandAllTool } from "./expand-all.js";
|
import { registerExpandAllTool } from './expand-all.js';
|
||||||
import { registerRemoveDependencyTool } from "./remove-dependency.js";
|
import { registerRemoveDependencyTool } from './remove-dependency.js';
|
||||||
import { registerValidateDependenciesTool } from "./validate-dependencies.js";
|
import { registerValidateDependenciesTool } from './validate-dependencies.js';
|
||||||
import { registerFixDependenciesTool } from "./fix-dependencies.js";
|
import { registerFixDependenciesTool } from './fix-dependencies.js';
|
||||||
import { registerComplexityReportTool } from "./complexity-report.js";
|
import { registerComplexityReportTool } from './complexity-report.js';
|
||||||
import { registerAddDependencyTool } from "./add-dependency.js";
|
import { registerAddDependencyTool } from './add-dependency.js';
|
||||||
import { registerRemoveTaskTool } from './remove-task.js';
|
import { registerRemoveTaskTool } from './remove-task.js';
|
||||||
import { registerInitializeProjectTool } from './initialize-project.js';
|
import { registerInitializeProjectTool } from './initialize-project.js';
|
||||||
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
import { asyncOperationManager } from '../core/utils/async-manager.js';
|
||||||
@@ -34,40 +34,40 @@ import { asyncOperationManager } from '../core/utils/async-manager.js';
|
|||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
* @param {asyncOperationManager} asyncManager - The async operation manager instance
|
* @param {asyncOperationManager} asyncManager - The async operation manager instance
|
||||||
*/
|
*/
|
||||||
export function registerTaskMasterTools(server, asyncManager) {
|
export function registerTaskMasterTools(server, asyncManager) {
|
||||||
try {
|
try {
|
||||||
// Register each tool
|
// Register each tool
|
||||||
registerListTasksTool(server);
|
registerListTasksTool(server);
|
||||||
registerSetTaskStatusTool(server);
|
registerSetTaskStatusTool(server);
|
||||||
registerParsePRDTool(server);
|
registerParsePRDTool(server);
|
||||||
registerUpdateTool(server);
|
registerUpdateTool(server);
|
||||||
registerUpdateTaskTool(server);
|
registerUpdateTaskTool(server);
|
||||||
registerUpdateSubtaskTool(server);
|
registerUpdateSubtaskTool(server);
|
||||||
registerGenerateTool(server);
|
registerGenerateTool(server);
|
||||||
registerShowTaskTool(server);
|
registerShowTaskTool(server);
|
||||||
registerNextTaskTool(server);
|
registerNextTaskTool(server);
|
||||||
registerExpandTaskTool(server);
|
registerExpandTaskTool(server);
|
||||||
registerAddTaskTool(server, asyncManager);
|
registerAddTaskTool(server, asyncManager);
|
||||||
registerAddSubtaskTool(server);
|
registerAddSubtaskTool(server);
|
||||||
registerRemoveSubtaskTool(server);
|
registerRemoveSubtaskTool(server);
|
||||||
registerAnalyzeTool(server);
|
registerAnalyzeTool(server);
|
||||||
registerClearSubtasksTool(server);
|
registerClearSubtasksTool(server);
|
||||||
registerExpandAllTool(server);
|
registerExpandAllTool(server);
|
||||||
registerRemoveDependencyTool(server);
|
registerRemoveDependencyTool(server);
|
||||||
registerValidateDependenciesTool(server);
|
registerValidateDependenciesTool(server);
|
||||||
registerFixDependenciesTool(server);
|
registerFixDependenciesTool(server);
|
||||||
registerComplexityReportTool(server);
|
registerComplexityReportTool(server);
|
||||||
registerAddDependencyTool(server);
|
registerAddDependencyTool(server);
|
||||||
registerRemoveTaskTool(server);
|
registerRemoveTaskTool(server);
|
||||||
registerInitializeProjectTool(server);
|
registerInitializeProjectTool(server);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(`Error registering Task Master tools: ${error.message}`);
|
logger.error(`Error registering Task Master tools: ${error.message}`);
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info('Registered Task Master MCP tools');
|
logger.info('Registered Task Master MCP tools');
|
||||||
}
|
}
|
||||||
|
|
||||||
export default {
|
export default {
|
||||||
registerTaskMasterTools,
|
registerTaskMasterTools
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,62 +1,99 @@
|
|||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import { execSync } from 'child_process';
|
import { execSync } from 'child_process';
|
||||||
import { createContentResponse, createErrorResponse } from "./utils.js"; // Only need response creators
|
import { createContentResponse, createErrorResponse } from './utils.js'; // Only need response creators
|
||||||
|
|
||||||
export function registerInitializeProjectTool(server) {
|
export function registerInitializeProjectTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "initialize_project", // snake_case for tool name
|
name: 'initialize_project', // snake_case for tool name
|
||||||
description: "Initializes a new Task Master project structure in the current working directory by running 'task-master init'.",
|
description:
|
||||||
parameters: z.object({
|
"Initializes a new Task Master project structure in the current working directory by running 'task-master init'.",
|
||||||
projectName: z.string().optional().describe("The name for the new project."),
|
parameters: z.object({
|
||||||
projectDescription: z.string().optional().describe("A brief description for the project."),
|
projectName: z
|
||||||
projectVersion: z.string().optional().describe("The initial version for the project (e.g., '0.1.0')."),
|
.string()
|
||||||
authorName: z.string().optional().describe("The author's name."),
|
.optional()
|
||||||
skipInstall: z.boolean().optional().default(false).describe("Skip installing dependencies automatically."),
|
.describe('The name for the new project.'),
|
||||||
addAliases: z.boolean().optional().default(false).describe("Add shell aliases (tm, taskmaster) to shell config file."),
|
projectDescription: z
|
||||||
yes: z.boolean().optional().default(false).describe("Skip prompts and use default values or provided arguments."),
|
.string()
|
||||||
// projectRoot is not needed here as 'init' works on the current directory
|
.optional()
|
||||||
}),
|
.describe('A brief description for the project.'),
|
||||||
execute: async (args, { log }) => { // Destructure context to get log
|
projectVersion: z
|
||||||
try {
|
.string()
|
||||||
log.info(`Executing initialize_project with args: ${JSON.stringify(args)}`);
|
.optional()
|
||||||
|
.describe("The initial version for the project (e.g., '0.1.0')."),
|
||||||
|
authorName: z.string().optional().describe("The author's name."),
|
||||||
|
skipInstall: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.default(false)
|
||||||
|
.describe('Skip installing dependencies automatically.'),
|
||||||
|
addAliases: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.default(false)
|
||||||
|
.describe('Add shell aliases (tm, taskmaster) to shell config file.'),
|
||||||
|
yes: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.default(false)
|
||||||
|
.describe('Skip prompts and use default values or provided arguments.')
|
||||||
|
// projectRoot is not needed here as 'init' works on the current directory
|
||||||
|
}),
|
||||||
|
execute: async (args, { log }) => {
|
||||||
|
// Destructure context to get log
|
||||||
|
try {
|
||||||
|
log.info(
|
||||||
|
`Executing initialize_project with args: ${JSON.stringify(args)}`
|
||||||
|
);
|
||||||
|
|
||||||
// Construct the command arguments carefully
|
// Construct the command arguments carefully
|
||||||
// Using npx ensures it uses the locally installed version if available, or fetches it
|
// Using npx ensures it uses the locally installed version if available, or fetches it
|
||||||
let command = 'npx task-master init';
|
let command = 'npx task-master init';
|
||||||
const cliArgs = [];
|
const cliArgs = [];
|
||||||
if (args.projectName) cliArgs.push(`--name "${args.projectName.replace(/"/g, '\\"')}"`); // Escape quotes
|
if (args.projectName)
|
||||||
if (args.projectDescription) cliArgs.push(`--description "${args.projectDescription.replace(/"/g, '\\"')}"`);
|
cliArgs.push(`--name "${args.projectName.replace(/"/g, '\\"')}"`); // Escape quotes
|
||||||
if (args.projectVersion) cliArgs.push(`--version "${args.projectVersion.replace(/"/g, '\\"')}"`);
|
if (args.projectDescription)
|
||||||
if (args.authorName) cliArgs.push(`--author "${args.authorName.replace(/"/g, '\\"')}"`);
|
cliArgs.push(
|
||||||
if (args.skipInstall) cliArgs.push('--skip-install');
|
`--description "${args.projectDescription.replace(/"/g, '\\"')}"`
|
||||||
if (args.addAliases) cliArgs.push('--aliases');
|
);
|
||||||
if (args.yes) cliArgs.push('--yes');
|
if (args.projectVersion)
|
||||||
|
cliArgs.push(
|
||||||
|
`--version "${args.projectVersion.replace(/"/g, '\\"')}"`
|
||||||
|
);
|
||||||
|
if (args.authorName)
|
||||||
|
cliArgs.push(`--author "${args.authorName.replace(/"/g, '\\"')}"`);
|
||||||
|
if (args.skipInstall) cliArgs.push('--skip-install');
|
||||||
|
if (args.addAliases) cliArgs.push('--aliases');
|
||||||
|
if (args.yes) cliArgs.push('--yes');
|
||||||
|
|
||||||
command += ' ' + cliArgs.join(' ');
|
command += ' ' + cliArgs.join(' ');
|
||||||
|
|
||||||
log.info(`Constructed command: ${command}`);
|
log.info(`Constructed command: ${command}`);
|
||||||
|
|
||||||
// Execute the command in the current working directory of the server process
|
// Execute the command in the current working directory of the server process
|
||||||
// Capture stdout/stderr. Use a reasonable timeout (e.g., 5 minutes)
|
// Capture stdout/stderr. Use a reasonable timeout (e.g., 5 minutes)
|
||||||
const output = execSync(command, { encoding: 'utf8', stdio: 'pipe', timeout: 300000 });
|
const output = execSync(command, {
|
||||||
|
encoding: 'utf8',
|
||||||
|
stdio: 'pipe',
|
||||||
|
timeout: 300000
|
||||||
|
});
|
||||||
|
|
||||||
log.info(`Initialization output:\n${output}`);
|
log.info(`Initialization output:\n${output}`);
|
||||||
|
|
||||||
// Return a standard success response manually
|
// Return a standard success response manually
|
||||||
return createContentResponse(
|
return createContentResponse(
|
||||||
"Project initialized successfully.",
|
'Project initialized successfully.',
|
||||||
{ output: output } // Include output in the data payload
|
{ output: output } // Include output in the data payload
|
||||||
);
|
);
|
||||||
|
} catch (error) {
|
||||||
|
// Catch errors from execSync or timeouts
|
||||||
|
const errorMessage = `Project initialization failed: ${error.message}`;
|
||||||
|
const errorDetails =
|
||||||
|
error.stderr?.toString() || error.stdout?.toString() || error.message; // Provide stderr/stdout if available
|
||||||
|
log.error(`${errorMessage}\nDetails: ${errorDetails}`);
|
||||||
|
|
||||||
} catch (error) {
|
// Return a standard error response manually
|
||||||
// Catch errors from execSync or timeouts
|
return createErrorResponse(errorMessage, { details: errorDetails });
|
||||||
const errorMessage = `Project initialization failed: ${error.message}`;
|
}
|
||||||
const errorDetails = error.stderr?.toString() || error.stdout?.toString() || error.message; // Provide stderr/stdout if available
|
}
|
||||||
log.error(`${errorMessage}\nDetails: ${errorDetails}`);
|
});
|
||||||
|
}
|
||||||
// Return a standard error response manually
|
|
||||||
return createErrorResponse(errorMessage, { details: errorDetails });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,61 +3,69 @@
|
|||||||
* Tool to find the next task to work on
|
* Tool to find the next task to work on
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { nextTaskDirect } from "../core/task-master-core.js";
|
import { nextTaskDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the next-task tool with the MCP server
|
* Register the next-task tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerNextTaskTool(server) {
|
export function registerNextTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "next_task",
|
name: 'next_task',
|
||||||
description: "Find the next task to work on based on dependencies and status",
|
description:
|
||||||
parameters: z.object({
|
'Find the next task to work on based on dependencies and status',
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
parameters: z.object({
|
||||||
projectRoot: z
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
.string()
|
projectRoot: z
|
||||||
.optional()
|
.string()
|
||||||
.describe(
|
.optional()
|
||||||
"Root directory of the project (default: current working directory)"
|
.describe(
|
||||||
),
|
'Root directory of the project (default: current working directory)'
|
||||||
}),
|
)
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
}),
|
||||||
try {
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
log.info(`Finding next task with args: ${JSON.stringify(args)}`);
|
try {
|
||||||
// await reportProgress({ progress: 0 });
|
log.info(`Finding next task with args: ${JSON.stringify(args)}`);
|
||||||
|
// await reportProgress({ progress: 0 });
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
if (!rootFolder && args.projectRoot) {
|
|
||||||
rootFolder = args.projectRoot;
|
if (!rootFolder && args.projectRoot) {
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
rootFolder = args.projectRoot;
|
||||||
}
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
}
|
||||||
const result = await nextTaskDirect({
|
|
||||||
projectRoot: rootFolder,
|
const result = await nextTaskDirect(
|
||||||
...args
|
{
|
||||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
projectRoot: rootFolder,
|
||||||
|
...args
|
||||||
// await reportProgress({ progress: 100 });
|
},
|
||||||
|
log /*, { reportProgress, mcpLog: log, session}*/
|
||||||
if (result.success) {
|
);
|
||||||
log.info(`Successfully found next task: ${result.data?.task?.id || 'No available tasks'}`);
|
|
||||||
} else {
|
// await reportProgress({ progress: 100 });
|
||||||
log.error(`Failed to find next task: ${result.error?.message || 'Unknown error'}`);
|
|
||||||
}
|
if (result.success) {
|
||||||
|
log.info(
|
||||||
return handleApiResult(result, log, 'Error finding next task');
|
`Successfully found next task: ${result.data?.task?.id || 'No available tasks'}`
|
||||||
} catch (error) {
|
);
|
||||||
log.error(`Error in nextTask tool: ${error.message}`);
|
} else {
|
||||||
return createErrorResponse(error.message);
|
log.error(
|
||||||
}
|
`Failed to find next task: ${result.error?.message || 'Unknown error'}`
|
||||||
},
|
);
|
||||||
});
|
}
|
||||||
}
|
|
||||||
|
return handleApiResult(result, log, 'Error finding next task');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in nextTask tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,61 +3,86 @@
|
|||||||
* Tool to parse PRD document and generate tasks
|
* Tool to parse PRD document and generate tasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { parsePRDDirect } from "../core/task-master-core.js";
|
import { parsePRDDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the parsePRD tool with the MCP server
|
* Register the parsePRD tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerParsePRDTool(server) {
|
export function registerParsePRDTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "parse_prd",
|
name: 'parse_prd',
|
||||||
description: "Parse a Product Requirements Document (PRD) or text file to automatically generate initial tasks.",
|
description:
|
||||||
parameters: z.object({
|
'Parse a Product Requirements Document (PRD) or text file to automatically generate initial tasks.',
|
||||||
input: z.string().default("tasks/tasks.json").describe("Path to the PRD document file (relative to project root or absolute)"),
|
parameters: z.object({
|
||||||
numTasks: z.string().optional().describe("Approximate number of top-level tasks to generate (default: 10)"),
|
input: z
|
||||||
output: z.string().optional().describe("Output path for tasks.json file (relative to project root or absolute, default: tasks/tasks.json)"),
|
.string()
|
||||||
force: z.boolean().optional().describe("Allow overwriting an existing tasks.json file."),
|
.default('tasks/tasks.json')
|
||||||
projectRoot: z
|
.describe(
|
||||||
.string()
|
'Path to the PRD document file (relative to project root or absolute)'
|
||||||
.optional()
|
),
|
||||||
.describe(
|
numTasks: z
|
||||||
"Root directory of the project (default: automatically detected from session or CWD)"
|
.string()
|
||||||
),
|
.optional()
|
||||||
}),
|
.describe(
|
||||||
execute: async (args, { log, session }) => {
|
'Approximate number of top-level tasks to generate (default: 10)'
|
||||||
try {
|
),
|
||||||
log.info(`Parsing PRD with args: ${JSON.stringify(args)}`);
|
output: z
|
||||||
|
.string()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.optional()
|
||||||
|
.describe(
|
||||||
if (!rootFolder && args.projectRoot) {
|
'Output path for tasks.json file (relative to project root or absolute, default: tasks/tasks.json)'
|
||||||
rootFolder = args.projectRoot;
|
),
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
force: z
|
||||||
}
|
.boolean()
|
||||||
|
.optional()
|
||||||
const result = await parsePRDDirect({
|
.describe('Allow overwriting an existing tasks.json file.'),
|
||||||
projectRoot: rootFolder,
|
projectRoot: z
|
||||||
...args
|
.string()
|
||||||
}, log, { session });
|
.optional()
|
||||||
|
.describe(
|
||||||
if (result.success) {
|
'Root directory of the project (default: automatically detected from session or CWD)'
|
||||||
log.info(`Successfully parsed PRD: ${result.data.message}`);
|
)
|
||||||
} else {
|
}),
|
||||||
log.error(`Failed to parse PRD: ${result.error?.message || 'Unknown error'}`);
|
execute: async (args, { log, session }) => {
|
||||||
}
|
try {
|
||||||
|
log.info(`Parsing PRD with args: ${JSON.stringify(args)}`);
|
||||||
return handleApiResult(result, log, 'Error parsing PRD');
|
|
||||||
} catch (error) {
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
log.error(`Error in parse-prd tool: ${error.message}`);
|
|
||||||
return createErrorResponse(error.message);
|
if (!rootFolder && args.projectRoot) {
|
||||||
}
|
rootFolder = args.projectRoot;
|
||||||
},
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
});
|
}
|
||||||
}
|
|
||||||
|
const result = await parsePRDDirect(
|
||||||
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
|
...args
|
||||||
|
},
|
||||||
|
log,
|
||||||
|
{ session }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(`Successfully parsed PRD: ${result.data.message}`);
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
`Failed to parse PRD: ${result.error?.message || 'Unknown error'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error parsing PRD');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in parse-prd tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,58 +3,71 @@
|
|||||||
* Tool for removing a dependency from a task
|
* Tool for removing a dependency from a task
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { removeDependencyDirect } from "../core/task-master-core.js";
|
import { removeDependencyDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the removeDependency tool with the MCP server
|
* Register the removeDependency tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerRemoveDependencyTool(server) {
|
export function registerRemoveDependencyTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "remove_dependency",
|
name: 'remove_dependency',
|
||||||
description: "Remove a dependency from a task",
|
description: 'Remove a dependency from a task',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe("Task ID to remove dependency from"),
|
id: z.string().describe('Task ID to remove dependency from'),
|
||||||
dependsOn: z.string().describe("Task ID to remove as a dependency"),
|
dependsOn: z.string().describe('Task ID to remove as a dependency'),
|
||||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
file: z
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
.string()
|
||||||
}),
|
.optional()
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.describe('Path to the tasks file (default: tasks/tasks.json)'),
|
||||||
try {
|
projectRoot: z
|
||||||
log.info(`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`);
|
.string()
|
||||||
// await reportProgress({ progress: 0 });
|
.optional()
|
||||||
|
.describe(
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
'Root directory of the project (default: current working directory)'
|
||||||
|
)
|
||||||
if (!rootFolder && args.projectRoot) {
|
}),
|
||||||
rootFolder = args.projectRoot;
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
try {
|
||||||
}
|
log.info(
|
||||||
|
`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`
|
||||||
const result = await removeDependencyDirect({
|
);
|
||||||
projectRoot: rootFolder,
|
// await reportProgress({ progress: 0 });
|
||||||
...args
|
|
||||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
// await reportProgress({ progress: 100 });
|
if (!rootFolder && args.projectRoot) {
|
||||||
|
rootFolder = args.projectRoot;
|
||||||
if (result.success) {
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
log.info(`Successfully removed dependency: ${result.data.message}`);
|
}
|
||||||
} else {
|
|
||||||
log.error(`Failed to remove dependency: ${result.error.message}`);
|
const result = await removeDependencyDirect(
|
||||||
}
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
return handleApiResult(result, log, 'Error removing dependency');
|
...args
|
||||||
} catch (error) {
|
},
|
||||||
log.error(`Error in removeDependency tool: ${error.message}`);
|
log /*, { reportProgress, mcpLog: log, session}*/
|
||||||
return createErrorResponse(error.message);
|
);
|
||||||
}
|
|
||||||
}
|
// await reportProgress({ progress: 100 });
|
||||||
});
|
|
||||||
}
|
if (result.success) {
|
||||||
|
log.info(`Successfully removed dependency: ${result.data.message}`);
|
||||||
|
} else {
|
||||||
|
log.error(`Failed to remove dependency: ${result.error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error removing dependency');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in removeDependency tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,59 +3,82 @@
|
|||||||
* Tool for removing subtasks from parent tasks
|
* Tool for removing subtasks from parent tasks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { removeSubtaskDirect } from "../core/task-master-core.js";
|
import { removeSubtaskDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the removeSubtask tool with the MCP server
|
* Register the removeSubtask tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerRemoveSubtaskTool(server) {
|
export function registerRemoveSubtaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "remove_subtask",
|
name: 'remove_subtask',
|
||||||
description: "Remove a subtask from its parent task",
|
description: 'Remove a subtask from its parent task',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe("Subtask ID to remove in format 'parentId.subtaskId' (required)"),
|
id: z
|
||||||
convert: z.boolean().optional().describe("Convert the subtask to a standalone task instead of deleting it"),
|
.string()
|
||||||
file: z.string().optional().describe("Path to the tasks file (default: tasks/tasks.json)"),
|
.describe(
|
||||||
skipGenerate: z.boolean().optional().describe("Skip regenerating task files"),
|
"Subtask ID to remove in format 'parentId.subtaskId' (required)"
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
),
|
||||||
}),
|
convert: z
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.boolean()
|
||||||
try {
|
.optional()
|
||||||
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
.describe(
|
||||||
// await reportProgress({ progress: 0 });
|
'Convert the subtask to a standalone task instead of deleting it'
|
||||||
|
),
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
file: z
|
||||||
|
.string()
|
||||||
if (!rootFolder && args.projectRoot) {
|
.optional()
|
||||||
rootFolder = args.projectRoot;
|
.describe('Path to the tasks file (default: tasks/tasks.json)'),
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
skipGenerate: z
|
||||||
}
|
.boolean()
|
||||||
|
.optional()
|
||||||
const result = await removeSubtaskDirect({
|
.describe('Skip regenerating task files'),
|
||||||
projectRoot: rootFolder,
|
projectRoot: z
|
||||||
...args
|
.string()
|
||||||
}, log/*, { reportProgress, mcpLog: log, session}*/);
|
.optional()
|
||||||
|
.describe(
|
||||||
// await reportProgress({ progress: 100 });
|
'Root directory of the project (default: current working directory)'
|
||||||
|
)
|
||||||
if (result.success) {
|
}),
|
||||||
log.info(`Subtask removed successfully: ${result.data.message}`);
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
} else {
|
try {
|
||||||
log.error(`Failed to remove subtask: ${result.error.message}`);
|
log.info(`Removing subtask with args: ${JSON.stringify(args)}`);
|
||||||
}
|
// await reportProgress({ progress: 0 });
|
||||||
|
|
||||||
return handleApiResult(result, log, 'Error removing subtask');
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in removeSubtask tool: ${error.message}`);
|
if (!rootFolder && args.projectRoot) {
|
||||||
return createErrorResponse(error.message);
|
rootFolder = args.projectRoot;
|
||||||
}
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
},
|
}
|
||||||
});
|
|
||||||
}
|
const result = await removeSubtaskDirect(
|
||||||
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
|
...args
|
||||||
|
},
|
||||||
|
log /*, { reportProgress, mcpLog: log, session}*/
|
||||||
|
);
|
||||||
|
|
||||||
|
// await reportProgress({ progress: 100 });
|
||||||
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(`Subtask removed successfully: ${result.data.message}`);
|
||||||
|
} else {
|
||||||
|
log.error(`Failed to remove subtask: ${result.error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error removing subtask');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in removeSubtask tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,69 +3,79 @@
|
|||||||
* Tool to remove a task by ID
|
* Tool to remove a task by ID
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { removeTaskDirect } from "../core/task-master-core.js";
|
import { removeTaskDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the remove-task tool with the MCP server
|
* Register the remove-task tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerRemoveTaskTool(server) {
|
export function registerRemoveTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "remove_task",
|
name: 'remove_task',
|
||||||
description: "Remove a task or subtask permanently from the tasks list",
|
description: 'Remove a task or subtask permanently from the tasks list',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z.string().describe("ID of the task or subtask to remove (e.g., '5' or '5.2')"),
|
id: z
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
.string()
|
||||||
projectRoot: z
|
.describe("ID of the task or subtask to remove (e.g., '5' or '5.2')"),
|
||||||
.string()
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
.optional()
|
projectRoot: z
|
||||||
.describe(
|
.string()
|
||||||
"Root directory of the project (default: current working directory)"
|
.optional()
|
||||||
),
|
.describe(
|
||||||
confirm: z.boolean().optional().describe("Whether to skip confirmation prompt (default: false)")
|
'Root directory of the project (default: current working directory)'
|
||||||
}),
|
),
|
||||||
execute: async (args, { log, session }) => {
|
confirm: z
|
||||||
try {
|
.boolean()
|
||||||
log.info(`Removing task with ID: ${args.id}`);
|
.optional()
|
||||||
|
.describe('Whether to skip confirmation prompt (default: false)')
|
||||||
// Get project root from session
|
}),
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
execute: async (args, { log, session }) => {
|
||||||
|
try {
|
||||||
if (!rootFolder && args.projectRoot) {
|
log.info(`Removing task with ID: ${args.id}`);
|
||||||
rootFolder = args.projectRoot;
|
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
// Get project root from session
|
||||||
} else if (!rootFolder) {
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
// Ensure we have a default if nothing else works
|
|
||||||
rootFolder = process.cwd();
|
if (!rootFolder && args.projectRoot) {
|
||||||
log.warn(`Session and args failed to provide root, using CWD: ${rootFolder}`);
|
rootFolder = args.projectRoot;
|
||||||
}
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
} else if (!rootFolder) {
|
||||||
log.info(`Using project root: ${rootFolder}`);
|
// Ensure we have a default if nothing else works
|
||||||
|
rootFolder = process.cwd();
|
||||||
// Assume client has already handled confirmation if needed
|
log.warn(
|
||||||
const result = await removeTaskDirect({
|
`Session and args failed to provide root, using CWD: ${rootFolder}`
|
||||||
id: args.id,
|
);
|
||||||
file: args.file,
|
}
|
||||||
projectRoot: rootFolder
|
|
||||||
}, log);
|
log.info(`Using project root: ${rootFolder}`);
|
||||||
|
|
||||||
if (result.success) {
|
// Assume client has already handled confirmation if needed
|
||||||
log.info(`Successfully removed task: ${args.id}`);
|
const result = await removeTaskDirect(
|
||||||
} else {
|
{
|
||||||
log.error(`Failed to remove task: ${result.error.message}`);
|
id: args.id,
|
||||||
}
|
file: args.file,
|
||||||
|
projectRoot: rootFolder
|
||||||
return handleApiResult(result, log, 'Error removing task');
|
},
|
||||||
} catch (error) {
|
log
|
||||||
log.error(`Error in remove-task tool: ${error.message}`);
|
);
|
||||||
return createErrorResponse(`Failed to remove task: ${error.message}`);
|
|
||||||
}
|
if (result.success) {
|
||||||
},
|
log.info(`Successfully removed task: ${args.id}`);
|
||||||
});
|
} else {
|
||||||
}
|
log.error(`Failed to remove task: ${result.error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error removing task');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in remove-task tool: ${error.message}`);
|
||||||
|
return createErrorResponse(`Failed to remove task: ${error.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,68 +3,81 @@
|
|||||||
* Tool to set the status of a task
|
* Tool to set the status of a task
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { setTaskStatusDirect } from "../core/task-master-core.js";
|
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the setTaskStatus tool with the MCP server
|
* Register the setTaskStatus tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerSetTaskStatusTool(server) {
|
export function registerSetTaskStatusTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "set_task_status",
|
name: 'set_task_status',
|
||||||
description: "Set the status of one or more tasks or subtasks.",
|
description: 'Set the status of one or more tasks or subtasks.',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
id: z
|
id: z
|
||||||
.string()
|
.string()
|
||||||
.describe("Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated for multiple updates."),
|
.describe(
|
||||||
status: z
|
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated for multiple updates."
|
||||||
.string()
|
),
|
||||||
.describe("New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."),
|
status: z
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
.string()
|
||||||
projectRoot: z
|
.describe(
|
||||||
.string()
|
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
||||||
.optional()
|
),
|
||||||
.describe(
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
"Root directory of the project (default: automatically detected)"
|
projectRoot: z
|
||||||
),
|
.string()
|
||||||
}),
|
.optional()
|
||||||
execute: async (args, { log, session }) => {
|
.describe(
|
||||||
try {
|
'Root directory of the project (default: automatically detected)'
|
||||||
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
|
)
|
||||||
|
}),
|
||||||
// Get project root from session
|
execute: async (args, { log, session }) => {
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
try {
|
||||||
|
log.info(`Setting status of task(s) ${args.id} to: ${args.status}`);
|
||||||
if (!rootFolder && args.projectRoot) {
|
|
||||||
rootFolder = args.projectRoot;
|
// Get project root from session
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
}
|
|
||||||
|
if (!rootFolder && args.projectRoot) {
|
||||||
// Call the direct function with the project root
|
rootFolder = args.projectRoot;
|
||||||
const result = await setTaskStatusDirect({
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
...args,
|
}
|
||||||
projectRoot: rootFolder
|
|
||||||
}, log);
|
// Call the direct function with the project root
|
||||||
|
const result = await setTaskStatusDirect(
|
||||||
// Log the result
|
{
|
||||||
if (result.success) {
|
...args,
|
||||||
log.info(`Successfully updated status for task(s) ${args.id} to "${args.status}": ${result.data.message}`);
|
projectRoot: rootFolder
|
||||||
} else {
|
},
|
||||||
log.error(`Failed to update task status: ${result.error?.message || 'Unknown error'}`);
|
log
|
||||||
}
|
);
|
||||||
|
|
||||||
// Format and return the result
|
// Log the result
|
||||||
return handleApiResult(result, log, 'Error setting task status');
|
if (result.success) {
|
||||||
} catch (error) {
|
log.info(
|
||||||
log.error(`Error in setTaskStatus tool: ${error.message}`);
|
`Successfully updated status for task(s) ${args.id} to "${args.status}": ${result.data.message}`
|
||||||
return createErrorResponse(`Error setting task status: ${error.message}`);
|
);
|
||||||
}
|
} else {
|
||||||
},
|
log.error(
|
||||||
});
|
`Failed to update task status: ${result.error?.message || 'Unknown error'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format and return the result
|
||||||
|
return handleApiResult(result, log, 'Error setting task status');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in setTaskStatus tool: ${error.message}`);
|
||||||
|
return createErrorResponse(
|
||||||
|
`Error setting task status: ${error.message}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,61 +3,75 @@
|
|||||||
* Tool to append additional information to a specific subtask
|
* Tool to append additional information to a specific subtask
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { updateSubtaskByIdDirect } from "../core/task-master-core.js";
|
import { updateSubtaskByIdDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the update-subtask tool with the MCP server
|
* Register the update-subtask tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerUpdateSubtaskTool(server) {
|
export function registerUpdateSubtaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "update_subtask",
|
name: 'update_subtask',
|
||||||
description: "Appends additional information to a specific subtask without replacing existing content",
|
description:
|
||||||
parameters: z.object({
|
'Appends additional information to a specific subtask without replacing existing content',
|
||||||
id: z.string().describe("ID of the subtask to update in format \"parentId.subtaskId\" (e.g., \"5.2\")"),
|
parameters: z.object({
|
||||||
prompt: z.string().describe("Information to add to the subtask"),
|
id: z
|
||||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
|
.string()
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
.describe(
|
||||||
projectRoot: z
|
'ID of the subtask to update in format "parentId.subtaskId" (e.g., "5.2")'
|
||||||
.string()
|
),
|
||||||
.optional()
|
prompt: z.string().describe('Information to add to the subtask'),
|
||||||
.describe(
|
research: z
|
||||||
"Root directory of the project (default: current working directory)"
|
.boolean()
|
||||||
),
|
.optional()
|
||||||
}),
|
.describe('Use Perplexity AI for research-backed updates'),
|
||||||
execute: async (args, { log, session }) => {
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
try {
|
projectRoot: z
|
||||||
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
|
.string()
|
||||||
|
.optional()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.describe(
|
||||||
|
'Root directory of the project (default: current working directory)'
|
||||||
if (!rootFolder && args.projectRoot) {
|
)
|
||||||
rootFolder = args.projectRoot;
|
}),
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
execute: async (args, { log, session }) => {
|
||||||
}
|
try {
|
||||||
|
log.info(`Updating subtask with args: ${JSON.stringify(args)}`);
|
||||||
const result = await updateSubtaskByIdDirect({
|
|
||||||
projectRoot: rootFolder,
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
...args
|
|
||||||
}, log, { session });
|
if (!rootFolder && args.projectRoot) {
|
||||||
|
rootFolder = args.projectRoot;
|
||||||
if (result.success) {
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
log.info(`Successfully updated subtask with ID ${args.id}`);
|
}
|
||||||
} else {
|
|
||||||
log.error(`Failed to update subtask: ${result.error?.message || 'Unknown error'}`);
|
const result = await updateSubtaskByIdDirect(
|
||||||
}
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
return handleApiResult(result, log, 'Error updating subtask');
|
...args
|
||||||
} catch (error) {
|
},
|
||||||
log.error(`Error in update_subtask tool: ${error.message}`);
|
log,
|
||||||
return createErrorResponse(error.message);
|
{ session }
|
||||||
}
|
);
|
||||||
},
|
|
||||||
});
|
if (result.success) {
|
||||||
}
|
log.info(`Successfully updated subtask with ID ${args.id}`);
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
`Failed to update subtask: ${result.error?.message || 'Unknown error'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error updating subtask');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in update_subtask tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,61 +3,75 @@
|
|||||||
* Tool to update a single task by ID with new information
|
* Tool to update a single task by ID with new information
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { updateTaskByIdDirect } from "../core/task-master-core.js";
|
import { updateTaskByIdDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the update-task tool with the MCP server
|
* Register the update-task tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerUpdateTaskTool(server) {
|
export function registerUpdateTaskTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "update_task",
|
name: 'update_task',
|
||||||
description: "Updates a single task by ID with new information or context provided in the prompt.",
|
description:
|
||||||
parameters: z.object({
|
'Updates a single task by ID with new information or context provided in the prompt.',
|
||||||
id: z.string().describe("ID of the task or subtask (e.g., '15', '15.2') to update"),
|
parameters: z.object({
|
||||||
prompt: z.string().describe("New information or context to incorporate into the task"),
|
id: z
|
||||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
|
.string()
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
.describe("ID of the task or subtask (e.g., '15', '15.2') to update"),
|
||||||
projectRoot: z
|
prompt: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
.describe('New information or context to incorporate into the task'),
|
||||||
.describe(
|
research: z
|
||||||
"Root directory of the project (default: current working directory)"
|
.boolean()
|
||||||
),
|
.optional()
|
||||||
}),
|
.describe('Use Perplexity AI for research-backed updates'),
|
||||||
execute: async (args, { log, session }) => {
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
try {
|
projectRoot: z
|
||||||
log.info(`Updating task with args: ${JSON.stringify(args)}`);
|
.string()
|
||||||
|
.optional()
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.describe(
|
||||||
|
'Root directory of the project (default: current working directory)'
|
||||||
if (!rootFolder && args.projectRoot) {
|
)
|
||||||
rootFolder = args.projectRoot;
|
}),
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
execute: async (args, { log, session }) => {
|
||||||
}
|
try {
|
||||||
|
log.info(`Updating task with args: ${JSON.stringify(args)}`);
|
||||||
const result = await updateTaskByIdDirect({
|
|
||||||
projectRoot: rootFolder,
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
...args
|
|
||||||
}, log, { session });
|
if (!rootFolder && args.projectRoot) {
|
||||||
|
rootFolder = args.projectRoot;
|
||||||
if (result.success) {
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
log.info(`Successfully updated task with ID ${args.id}`);
|
}
|
||||||
} else {
|
|
||||||
log.error(`Failed to update task: ${result.error?.message || 'Unknown error'}`);
|
const result = await updateTaskByIdDirect(
|
||||||
}
|
{
|
||||||
|
projectRoot: rootFolder,
|
||||||
return handleApiResult(result, log, 'Error updating task');
|
...args
|
||||||
} catch (error) {
|
},
|
||||||
log.error(`Error in update_task tool: ${error.message}`);
|
log,
|
||||||
return createErrorResponse(error.message);
|
{ session }
|
||||||
}
|
);
|
||||||
},
|
|
||||||
});
|
if (result.success) {
|
||||||
}
|
log.info(`Successfully updated task with ID ${args.id}`);
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
`Failed to update task: ${result.error?.message || 'Unknown error'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error updating task');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in update_task tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,61 +3,79 @@
|
|||||||
* Tool to update tasks based on new context/prompt
|
* Tool to update tasks based on new context/prompt
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { updateTasksDirect } from "../core/task-master-core.js";
|
import { updateTasksDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the update tool with the MCP server
|
* Register the update tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerUpdateTool(server) {
|
export function registerUpdateTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "update",
|
name: 'update',
|
||||||
description: "Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt. Use 'update_task' instead for a single specific task.",
|
description:
|
||||||
parameters: z.object({
|
"Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt. Use 'update_task' instead for a single specific task.",
|
||||||
from: z.string().describe("Task ID from which to start updating (inclusive). IMPORTANT: This tool uses 'from', not 'id'"),
|
parameters: z.object({
|
||||||
prompt: z.string().describe("Explanation of changes or new context to apply"),
|
from: z
|
||||||
research: z.boolean().optional().describe("Use Perplexity AI for research-backed updates"),
|
.string()
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
.describe(
|
||||||
projectRoot: z
|
"Task ID from which to start updating (inclusive). IMPORTANT: This tool uses 'from', not 'id'"
|
||||||
.string()
|
),
|
||||||
.optional()
|
prompt: z
|
||||||
.describe(
|
.string()
|
||||||
"Root directory of the project (default: current working directory)"
|
.describe('Explanation of changes or new context to apply'),
|
||||||
),
|
research: z
|
||||||
}),
|
.boolean()
|
||||||
execute: async (args, { log, session }) => {
|
.optional()
|
||||||
try {
|
.describe('Use Perplexity AI for research-backed updates'),
|
||||||
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
|
projectRoot: z
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
.string()
|
||||||
|
.optional()
|
||||||
if (!rootFolder && args.projectRoot) {
|
.describe(
|
||||||
rootFolder = args.projectRoot;
|
'Root directory of the project (default: current working directory)'
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
)
|
||||||
}
|
}),
|
||||||
|
execute: async (args, { log, session }) => {
|
||||||
const result = await updateTasksDirect({
|
try {
|
||||||
projectRoot: rootFolder,
|
log.info(`Updating tasks with args: ${JSON.stringify(args)}`);
|
||||||
...args
|
|
||||||
}, log, { session });
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
|
|
||||||
if (result.success) {
|
if (!rootFolder && args.projectRoot) {
|
||||||
log.info(`Successfully updated tasks from ID ${args.from}: ${result.data.message}`);
|
rootFolder = args.projectRoot;
|
||||||
} else {
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
log.error(`Failed to update tasks: ${result.error?.message || 'Unknown error'}`);
|
}
|
||||||
}
|
|
||||||
|
const result = await updateTasksDirect(
|
||||||
return handleApiResult(result, log, 'Error updating tasks');
|
{
|
||||||
} catch (error) {
|
projectRoot: rootFolder,
|
||||||
log.error(`Error in update tool: ${error.message}`);
|
...args
|
||||||
return createErrorResponse(error.message);
|
},
|
||||||
}
|
log,
|
||||||
},
|
{ session }
|
||||||
});
|
);
|
||||||
}
|
|
||||||
|
if (result.success) {
|
||||||
|
log.info(
|
||||||
|
`Successfully updated tasks from ID ${args.from}: ${result.data.message}`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.error(
|
||||||
|
`Failed to update tasks: ${result.error?.message || 'Unknown error'}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error updating tasks');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in update tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,68 +3,83 @@
|
|||||||
* Utility functions for Task Master CLI integration
|
* Utility functions for Task Master CLI integration
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { spawnSync } from "child_process";
|
import { spawnSync } from 'child_process';
|
||||||
import path from "path";
|
import path from 'path';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import { contextManager } from '../core/context-manager.js'; // Import the singleton
|
import { contextManager } from '../core/context-manager.js'; // Import the singleton
|
||||||
|
|
||||||
// Import path utilities to ensure consistent path resolution
|
// Import path utilities to ensure consistent path resolution
|
||||||
import { lastFoundProjectRoot, PROJECT_MARKERS } from '../core/utils/path-utils.js';
|
import {
|
||||||
|
lastFoundProjectRoot,
|
||||||
|
PROJECT_MARKERS
|
||||||
|
} from '../core/utils/path-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get normalized project root path
|
* Get normalized project root path
|
||||||
* @param {string|undefined} projectRootRaw - Raw project root from arguments
|
* @param {string|undefined} projectRootRaw - Raw project root from arguments
|
||||||
* @param {Object} log - Logger object
|
* @param {Object} log - Logger object
|
||||||
* @returns {string} - Normalized absolute path to project root
|
* @returns {string} - Normalized absolute path to project root
|
||||||
*/
|
*/
|
||||||
function getProjectRoot(projectRootRaw, log) {
|
function getProjectRoot(projectRootRaw, log) {
|
||||||
// PRECEDENCE ORDER:
|
// PRECEDENCE ORDER:
|
||||||
// 1. Environment variable override
|
// 1. Environment variable override
|
||||||
// 2. Explicitly provided projectRoot in args
|
// 2. Explicitly provided projectRoot in args
|
||||||
// 3. Previously found/cached project root
|
// 3. Previously found/cached project root
|
||||||
// 4. Current directory if it has project markers
|
// 4. Current directory if it has project markers
|
||||||
// 5. Current directory with warning
|
// 5. Current directory with warning
|
||||||
|
|
||||||
// 1. Check for environment variable override
|
|
||||||
if (process.env.TASK_MASTER_PROJECT_ROOT) {
|
|
||||||
const envRoot = process.env.TASK_MASTER_PROJECT_ROOT;
|
|
||||||
const absolutePath = path.isAbsolute(envRoot)
|
|
||||||
? envRoot
|
|
||||||
: path.resolve(process.cwd(), envRoot);
|
|
||||||
log.info(`Using project root from TASK_MASTER_PROJECT_ROOT environment variable: ${absolutePath}`);
|
|
||||||
return absolutePath;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. If project root is explicitly provided, use it
|
// 1. Check for environment variable override
|
||||||
if (projectRootRaw) {
|
if (process.env.TASK_MASTER_PROJECT_ROOT) {
|
||||||
const absolutePath = path.isAbsolute(projectRootRaw)
|
const envRoot = process.env.TASK_MASTER_PROJECT_ROOT;
|
||||||
? projectRootRaw
|
const absolutePath = path.isAbsolute(envRoot)
|
||||||
: path.resolve(process.cwd(), projectRootRaw);
|
? envRoot
|
||||||
|
: path.resolve(process.cwd(), envRoot);
|
||||||
log.info(`Using explicitly provided project root: ${absolutePath}`);
|
log.info(
|
||||||
return absolutePath;
|
`Using project root from TASK_MASTER_PROJECT_ROOT environment variable: ${absolutePath}`
|
||||||
}
|
);
|
||||||
|
return absolutePath;
|
||||||
// 3. If we have a last found project root from a tasks.json search, use that for consistency
|
}
|
||||||
if (lastFoundProjectRoot) {
|
|
||||||
log.info(`Using last known project root where tasks.json was found: ${lastFoundProjectRoot}`);
|
// 2. If project root is explicitly provided, use it
|
||||||
return lastFoundProjectRoot;
|
if (projectRootRaw) {
|
||||||
}
|
const absolutePath = path.isAbsolute(projectRootRaw)
|
||||||
|
? projectRootRaw
|
||||||
// 4. Check if the current directory has any indicators of being a task-master project
|
: path.resolve(process.cwd(), projectRootRaw);
|
||||||
const currentDir = process.cwd();
|
|
||||||
if (PROJECT_MARKERS.some(marker => {
|
log.info(`Using explicitly provided project root: ${absolutePath}`);
|
||||||
const markerPath = path.join(currentDir, marker);
|
return absolutePath;
|
||||||
return fs.existsSync(markerPath);
|
}
|
||||||
})) {
|
|
||||||
log.info(`Using current directory as project root (found project markers): ${currentDir}`);
|
// 3. If we have a last found project root from a tasks.json search, use that for consistency
|
||||||
return currentDir;
|
if (lastFoundProjectRoot) {
|
||||||
}
|
log.info(
|
||||||
|
`Using last known project root where tasks.json was found: ${lastFoundProjectRoot}`
|
||||||
// 5. Default to current working directory but warn the user
|
);
|
||||||
log.warn(`No task-master project detected in current directory. Using ${currentDir} as project root.`);
|
return lastFoundProjectRoot;
|
||||||
log.warn('Consider using --project-root to specify the correct project location or set TASK_MASTER_PROJECT_ROOT environment variable.');
|
}
|
||||||
return currentDir;
|
|
||||||
|
// 4. Check if the current directory has any indicators of being a task-master project
|
||||||
|
const currentDir = process.cwd();
|
||||||
|
if (
|
||||||
|
PROJECT_MARKERS.some((marker) => {
|
||||||
|
const markerPath = path.join(currentDir, marker);
|
||||||
|
return fs.existsSync(markerPath);
|
||||||
|
})
|
||||||
|
) {
|
||||||
|
log.info(
|
||||||
|
`Using current directory as project root (found project markers): ${currentDir}`
|
||||||
|
);
|
||||||
|
return currentDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Default to current working directory but warn the user
|
||||||
|
log.warn(
|
||||||
|
`No task-master project detected in current directory. Using ${currentDir} as project root.`
|
||||||
|
);
|
||||||
|
log.warn(
|
||||||
|
'Consider using --project-root to specify the correct project location or set TASK_MASTER_PROJECT_ROOT environment variable.'
|
||||||
|
);
|
||||||
|
return currentDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -74,81 +89,87 @@ function getProjectRoot(projectRootRaw, log) {
|
|||||||
* @returns {string|null} - The absolute path to the project root, or null if not found.
|
* @returns {string|null} - The absolute path to the project root, or null if not found.
|
||||||
*/
|
*/
|
||||||
function getProjectRootFromSession(session, log) {
|
function getProjectRootFromSession(session, log) {
|
||||||
try {
|
try {
|
||||||
// Add detailed logging of session structure
|
// Add detailed logging of session structure
|
||||||
log.info(`Session object: ${JSON.stringify({
|
log.info(
|
||||||
hasSession: !!session,
|
`Session object: ${JSON.stringify({
|
||||||
hasRoots: !!session?.roots,
|
hasSession: !!session,
|
||||||
rootsType: typeof session?.roots,
|
hasRoots: !!session?.roots,
|
||||||
isRootsArray: Array.isArray(session?.roots),
|
rootsType: typeof session?.roots,
|
||||||
rootsLength: session?.roots?.length,
|
isRootsArray: Array.isArray(session?.roots),
|
||||||
firstRoot: session?.roots?.[0],
|
rootsLength: session?.roots?.length,
|
||||||
hasRootsRoots: !!session?.roots?.roots,
|
firstRoot: session?.roots?.[0],
|
||||||
rootsRootsType: typeof session?.roots?.roots,
|
hasRootsRoots: !!session?.roots?.roots,
|
||||||
isRootsRootsArray: Array.isArray(session?.roots?.roots),
|
rootsRootsType: typeof session?.roots?.roots,
|
||||||
rootsRootsLength: session?.roots?.roots?.length,
|
isRootsRootsArray: Array.isArray(session?.roots?.roots),
|
||||||
firstRootsRoot: session?.roots?.roots?.[0]
|
rootsRootsLength: session?.roots?.roots?.length,
|
||||||
})}`);
|
firstRootsRoot: session?.roots?.roots?.[0]
|
||||||
|
})}`
|
||||||
// ALWAYS ensure we return a valid path for project root
|
);
|
||||||
const cwd = process.cwd();
|
|
||||||
|
|
||||||
// If we have a session with roots array
|
|
||||||
if (session?.roots?.[0]?.uri) {
|
|
||||||
const rootUri = session.roots[0].uri;
|
|
||||||
log.info(`Found rootUri in session.roots[0].uri: ${rootUri}`);
|
|
||||||
const rootPath = rootUri.startsWith('file://')
|
|
||||||
? decodeURIComponent(rootUri.slice(7))
|
|
||||||
: rootUri;
|
|
||||||
log.info(`Decoded rootPath: ${rootPath}`);
|
|
||||||
return rootPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have a session with roots.roots array (different structure)
|
|
||||||
if (session?.roots?.roots?.[0]?.uri) {
|
|
||||||
const rootUri = session.roots.roots[0].uri;
|
|
||||||
log.info(`Found rootUri in session.roots.roots[0].uri: ${rootUri}`);
|
|
||||||
const rootPath = rootUri.startsWith('file://')
|
|
||||||
? decodeURIComponent(rootUri.slice(7))
|
|
||||||
: rootUri;
|
|
||||||
log.info(`Decoded rootPath: ${rootPath}`);
|
|
||||||
return rootPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the server's location and try to find project root -- this is a fallback necessary in Cursor IDE
|
// ALWAYS ensure we return a valid path for project root
|
||||||
const serverPath = process.argv[1]; // This should be the path to server.js, which is in mcp-server/
|
const cwd = process.cwd();
|
||||||
if (serverPath && serverPath.includes('mcp-server')) {
|
|
||||||
// Find the mcp-server directory first
|
|
||||||
const mcpServerIndex = serverPath.indexOf('mcp-server');
|
|
||||||
if (mcpServerIndex !== -1) {
|
|
||||||
// Get the path up to mcp-server, which should be the project root
|
|
||||||
const projectRoot = serverPath.substring(0, mcpServerIndex - 1); // -1 to remove trailing slash
|
|
||||||
|
|
||||||
// Verify this looks like our project root by checking for key files/directories
|
|
||||||
if (fs.existsSync(path.join(projectRoot, '.cursor')) ||
|
|
||||||
fs.existsSync(path.join(projectRoot, 'mcp-server')) ||
|
|
||||||
fs.existsSync(path.join(projectRoot, 'package.json'))) {
|
|
||||||
log.info(`Found project root from server path: ${projectRoot}`);
|
|
||||||
return projectRoot;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ALWAYS ensure we return a valid path as a last resort
|
// If we have a session with roots array
|
||||||
log.info(`Using current working directory as ultimate fallback: ${cwd}`);
|
if (session?.roots?.[0]?.uri) {
|
||||||
return cwd;
|
const rootUri = session.roots[0].uri;
|
||||||
} catch (e) {
|
log.info(`Found rootUri in session.roots[0].uri: ${rootUri}`);
|
||||||
// If we have a server path, use it as a basis for project root
|
const rootPath = rootUri.startsWith('file://')
|
||||||
const serverPath = process.argv[1];
|
? decodeURIComponent(rootUri.slice(7))
|
||||||
if (serverPath && serverPath.includes('mcp-server')) {
|
: rootUri;
|
||||||
const mcpServerIndex = serverPath.indexOf('mcp-server');
|
log.info(`Decoded rootPath: ${rootPath}`);
|
||||||
return mcpServerIndex !== -1 ? serverPath.substring(0, mcpServerIndex - 1) : process.cwd();
|
return rootPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only use cwd if it's not "/"
|
// If we have a session with roots.roots array (different structure)
|
||||||
const cwd = process.cwd();
|
if (session?.roots?.roots?.[0]?.uri) {
|
||||||
return cwd !== '/' ? cwd : '/';
|
const rootUri = session.roots.roots[0].uri;
|
||||||
}
|
log.info(`Found rootUri in session.roots.roots[0].uri: ${rootUri}`);
|
||||||
|
const rootPath = rootUri.startsWith('file://')
|
||||||
|
? decodeURIComponent(rootUri.slice(7))
|
||||||
|
: rootUri;
|
||||||
|
log.info(`Decoded rootPath: ${rootPath}`);
|
||||||
|
return rootPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the server's location and try to find project root -- this is a fallback necessary in Cursor IDE
|
||||||
|
const serverPath = process.argv[1]; // This should be the path to server.js, which is in mcp-server/
|
||||||
|
if (serverPath && serverPath.includes('mcp-server')) {
|
||||||
|
// Find the mcp-server directory first
|
||||||
|
const mcpServerIndex = serverPath.indexOf('mcp-server');
|
||||||
|
if (mcpServerIndex !== -1) {
|
||||||
|
// Get the path up to mcp-server, which should be the project root
|
||||||
|
const projectRoot = serverPath.substring(0, mcpServerIndex - 1); // -1 to remove trailing slash
|
||||||
|
|
||||||
|
// Verify this looks like our project root by checking for key files/directories
|
||||||
|
if (
|
||||||
|
fs.existsSync(path.join(projectRoot, '.cursor')) ||
|
||||||
|
fs.existsSync(path.join(projectRoot, 'mcp-server')) ||
|
||||||
|
fs.existsSync(path.join(projectRoot, 'package.json'))
|
||||||
|
) {
|
||||||
|
log.info(`Found project root from server path: ${projectRoot}`);
|
||||||
|
return projectRoot;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ALWAYS ensure we return a valid path as a last resort
|
||||||
|
log.info(`Using current working directory as ultimate fallback: ${cwd}`);
|
||||||
|
return cwd;
|
||||||
|
} catch (e) {
|
||||||
|
// If we have a server path, use it as a basis for project root
|
||||||
|
const serverPath = process.argv[1];
|
||||||
|
if (serverPath && serverPath.includes('mcp-server')) {
|
||||||
|
const mcpServerIndex = serverPath.indexOf('mcp-server');
|
||||||
|
return mcpServerIndex !== -1
|
||||||
|
? serverPath.substring(0, mcpServerIndex - 1)
|
||||||
|
: process.cwd();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only use cwd if it's not "/"
|
||||||
|
const cwd = process.cwd();
|
||||||
|
return cwd !== '/' ? cwd : '/';
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -159,28 +180,35 @@ function getProjectRootFromSession(session, log) {
|
|||||||
* @param {Function} processFunction - Optional function to process successful result data
|
* @param {Function} processFunction - Optional function to process successful result data
|
||||||
* @returns {Object} - Standardized MCP response object
|
* @returns {Object} - Standardized MCP response object
|
||||||
*/
|
*/
|
||||||
function handleApiResult(result, log, errorPrefix = 'API error', processFunction = processMCPResponseData) {
|
function handleApiResult(
|
||||||
if (!result.success) {
|
result,
|
||||||
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
|
log,
|
||||||
// Include cache status in error logs
|
errorPrefix = 'API error',
|
||||||
log.error(`${errorPrefix}: ${errorMsg}. From cache: ${result.fromCache}`); // Keep logging cache status on error
|
processFunction = processMCPResponseData
|
||||||
return createErrorResponse(errorMsg);
|
) {
|
||||||
}
|
if (!result.success) {
|
||||||
|
const errorMsg = result.error?.message || `Unknown ${errorPrefix}`;
|
||||||
// Process the result data if needed
|
// Include cache status in error logs
|
||||||
const processedData = processFunction ? processFunction(result.data) : result.data;
|
log.error(`${errorPrefix}: ${errorMsg}. From cache: ${result.fromCache}`); // Keep logging cache status on error
|
||||||
|
return createErrorResponse(errorMsg);
|
||||||
// Log success including cache status
|
}
|
||||||
log.info(`Successfully completed operation. From cache: ${result.fromCache}`); // Add success log with cache status
|
|
||||||
|
|
||||||
// Create the response payload including the fromCache flag
|
// Process the result data if needed
|
||||||
const responsePayload = {
|
const processedData = processFunction
|
||||||
fromCache: result.fromCache, // Get the flag from the original 'result'
|
? processFunction(result.data)
|
||||||
data: processedData // Nest the processed data under a 'data' key
|
: result.data;
|
||||||
};
|
|
||||||
|
// Log success including cache status
|
||||||
// Pass this combined payload to createContentResponse
|
log.info(`Successfully completed operation. From cache: ${result.fromCache}`); // Add success log with cache status
|
||||||
return createContentResponse(responsePayload);
|
|
||||||
|
// Create the response payload including the fromCache flag
|
||||||
|
const responsePayload = {
|
||||||
|
fromCache: result.fromCache, // Get the flag from the original 'result'
|
||||||
|
data: processedData // Nest the processed data under a 'data' key
|
||||||
|
};
|
||||||
|
|
||||||
|
// Pass this combined payload to createContentResponse
|
||||||
|
return createContentResponse(responsePayload);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -193,75 +221,75 @@ function handleApiResult(result, log, errorPrefix = 'API error', processFunction
|
|||||||
* @returns {Object} - The result of the command execution
|
* @returns {Object} - The result of the command execution
|
||||||
*/
|
*/
|
||||||
function executeTaskMasterCommand(
|
function executeTaskMasterCommand(
|
||||||
command,
|
command,
|
||||||
log,
|
log,
|
||||||
args = [],
|
args = [],
|
||||||
projectRootRaw = null,
|
projectRootRaw = null,
|
||||||
customEnv = null // Changed from session to customEnv
|
customEnv = null // Changed from session to customEnv
|
||||||
) {
|
) {
|
||||||
try {
|
try {
|
||||||
// Normalize project root internally using the getProjectRoot utility
|
// Normalize project root internally using the getProjectRoot utility
|
||||||
const cwd = getProjectRoot(projectRootRaw, log);
|
const cwd = getProjectRoot(projectRootRaw, log);
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
`Executing task-master ${command} with args: ${JSON.stringify(
|
`Executing task-master ${command} with args: ${JSON.stringify(
|
||||||
args
|
args
|
||||||
)} in directory: ${cwd}`
|
)} in directory: ${cwd}`
|
||||||
);
|
);
|
||||||
|
|
||||||
// Prepare full arguments array
|
// Prepare full arguments array
|
||||||
const fullArgs = [command, ...args];
|
const fullArgs = [command, ...args];
|
||||||
|
|
||||||
// Common options for spawn
|
// Common options for spawn
|
||||||
const spawnOptions = {
|
const spawnOptions = {
|
||||||
encoding: "utf8",
|
encoding: 'utf8',
|
||||||
cwd: cwd,
|
cwd: cwd,
|
||||||
// Merge process.env with customEnv, giving precedence to customEnv
|
// Merge process.env with customEnv, giving precedence to customEnv
|
||||||
env: { ...process.env, ...(customEnv || {}) }
|
env: { ...process.env, ...(customEnv || {}) }
|
||||||
};
|
};
|
||||||
|
|
||||||
// Log the environment being passed (optional, for debugging)
|
// Log the environment being passed (optional, for debugging)
|
||||||
// log.info(`Spawn options env: ${JSON.stringify(spawnOptions.env)}`);
|
// log.info(`Spawn options env: ${JSON.stringify(spawnOptions.env)}`);
|
||||||
|
|
||||||
// Execute the command using the global task-master CLI or local script
|
// Execute the command using the global task-master CLI or local script
|
||||||
// Try the global CLI first
|
// Try the global CLI first
|
||||||
let result = spawnSync("task-master", fullArgs, spawnOptions);
|
let result = spawnSync('task-master', fullArgs, spawnOptions);
|
||||||
|
|
||||||
// If global CLI is not available, try fallback to the local script
|
// If global CLI is not available, try fallback to the local script
|
||||||
if (result.error && result.error.code === "ENOENT") {
|
if (result.error && result.error.code === 'ENOENT') {
|
||||||
log.info("Global task-master not found, falling back to local script");
|
log.info('Global task-master not found, falling back to local script');
|
||||||
// Pass the same spawnOptions (including env) to the fallback
|
// Pass the same spawnOptions (including env) to the fallback
|
||||||
result = spawnSync("node", ["scripts/dev.js", ...fullArgs], spawnOptions);
|
result = spawnSync('node', ['scripts/dev.js', ...fullArgs], spawnOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result.error) {
|
if (result.error) {
|
||||||
throw new Error(`Command execution error: ${result.error.message}`);
|
throw new Error(`Command execution error: ${result.error.message}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result.status !== 0) {
|
if (result.status !== 0) {
|
||||||
// Improve error handling by combining stderr and stdout if stderr is empty
|
// Improve error handling by combining stderr and stdout if stderr is empty
|
||||||
const errorOutput = result.stderr
|
const errorOutput = result.stderr
|
||||||
? result.stderr.trim()
|
? result.stderr.trim()
|
||||||
: result.stdout
|
: result.stdout
|
||||||
? result.stdout.trim()
|
? result.stdout.trim()
|
||||||
: "Unknown error";
|
: 'Unknown error';
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Command failed with exit code ${result.status}: ${errorOutput}`
|
`Command failed with exit code ${result.status}: ${errorOutput}`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
stdout: result.stdout,
|
stdout: result.stdout,
|
||||||
stderr: result.stderr,
|
stderr: result.stderr
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log.error(`Error executing task-master command: ${error.message}`);
|
log.error(`Error executing task-master command: ${error.message}`);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: error.message,
|
error: error.message
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -277,40 +305,44 @@ function executeTaskMasterCommand(
|
|||||||
* Format: { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
* Format: { success: boolean, data?: any, error?: { code: string, message: string }, fromCache: boolean }
|
||||||
*/
|
*/
|
||||||
async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
||||||
// Check cache first
|
// Check cache first
|
||||||
const cachedResult = contextManager.getCachedData(cacheKey);
|
const cachedResult = contextManager.getCachedData(cacheKey);
|
||||||
|
|
||||||
if (cachedResult !== undefined) {
|
|
||||||
log.info(`Cache hit for key: ${cacheKey}`);
|
|
||||||
// Return the cached data in the same structure as a fresh result
|
|
||||||
return {
|
|
||||||
...cachedResult, // Spread the cached result to maintain its structure
|
|
||||||
fromCache: true // Just add the fromCache flag
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
|
if (cachedResult !== undefined) {
|
||||||
|
log.info(`Cache hit for key: ${cacheKey}`);
|
||||||
// Execute the action function if cache missed
|
// Return the cached data in the same structure as a fresh result
|
||||||
const result = await actionFn();
|
return {
|
||||||
|
...cachedResult, // Spread the cached result to maintain its structure
|
||||||
// If the action was successful, cache the result (but without fromCache flag)
|
fromCache: true // Just add the fromCache flag
|
||||||
if (result.success && result.data !== undefined) {
|
};
|
||||||
log.info(`Action successful. Caching result for key: ${cacheKey}`);
|
}
|
||||||
// Cache the entire result structure (minus the fromCache flag)
|
|
||||||
const { fromCache, ...resultToCache } = result;
|
log.info(`Cache miss for key: ${cacheKey}. Executing action function.`);
|
||||||
contextManager.setCachedData(cacheKey, resultToCache);
|
|
||||||
} else if (!result.success) {
|
// Execute the action function if cache missed
|
||||||
log.warn(`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`);
|
const result = await actionFn();
|
||||||
} else {
|
|
||||||
log.warn(`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`);
|
// If the action was successful, cache the result (but without fromCache flag)
|
||||||
}
|
if (result.success && result.data !== undefined) {
|
||||||
|
log.info(`Action successful. Caching result for key: ${cacheKey}`);
|
||||||
// Return the fresh result, indicating it wasn't from cache
|
// Cache the entire result structure (minus the fromCache flag)
|
||||||
return {
|
const { fromCache, ...resultToCache } = result;
|
||||||
...result,
|
contextManager.setCachedData(cacheKey, resultToCache);
|
||||||
fromCache: false
|
} else if (!result.success) {
|
||||||
};
|
log.warn(
|
||||||
|
`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
log.warn(
|
||||||
|
`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the fresh result, indicating it wasn't from cache
|
||||||
|
return {
|
||||||
|
...result,
|
||||||
|
fromCache: false
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -320,56 +352,68 @@ async function getCachedOrExecute({ cacheKey, actionFn, log }) {
|
|||||||
* @param {string[]} fieldsToRemove - An array of field names to remove.
|
* @param {string[]} fieldsToRemove - An array of field names to remove.
|
||||||
* @returns {Object|Array} - The processed data with specified fields removed.
|
* @returns {Object|Array} - The processed data with specified fields removed.
|
||||||
*/
|
*/
|
||||||
function processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testStrategy']) {
|
function processMCPResponseData(
|
||||||
if (!taskOrData) {
|
taskOrData,
|
||||||
return taskOrData;
|
fieldsToRemove = ['details', 'testStrategy']
|
||||||
}
|
) {
|
||||||
|
if (!taskOrData) {
|
||||||
|
return taskOrData;
|
||||||
|
}
|
||||||
|
|
||||||
// Helper function to process a single task object
|
// Helper function to process a single task object
|
||||||
const processSingleTask = (task) => {
|
const processSingleTask = (task) => {
|
||||||
if (typeof task !== 'object' || task === null) {
|
if (typeof task !== 'object' || task === null) {
|
||||||
return task;
|
return task;
|
||||||
}
|
}
|
||||||
|
|
||||||
const processedTask = { ...task };
|
|
||||||
|
|
||||||
// Remove specified fields from the task
|
|
||||||
fieldsToRemove.forEach(field => {
|
|
||||||
delete processedTask[field];
|
|
||||||
});
|
|
||||||
|
|
||||||
// Recursively process subtasks if they exist and are an array
|
const processedTask = { ...task };
|
||||||
if (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {
|
|
||||||
// Use processArrayOfTasks to handle the subtasks array
|
|
||||||
processedTask.subtasks = processArrayOfTasks(processedTask.subtasks);
|
|
||||||
}
|
|
||||||
|
|
||||||
return processedTask;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Helper function to process an array of tasks
|
|
||||||
const processArrayOfTasks = (tasks) => {
|
|
||||||
return tasks.map(processSingleTask);
|
|
||||||
};
|
|
||||||
|
|
||||||
// Check if the input is a data structure containing a 'tasks' array (like from listTasks)
|
// Remove specified fields from the task
|
||||||
if (typeof taskOrData === 'object' && taskOrData !== null && Array.isArray(taskOrData.tasks)) {
|
fieldsToRemove.forEach((field) => {
|
||||||
return {
|
delete processedTask[field];
|
||||||
...taskOrData, // Keep other potential fields like 'stats', 'filter'
|
});
|
||||||
tasks: processArrayOfTasks(taskOrData.tasks),
|
|
||||||
};
|
// Recursively process subtasks if they exist and are an array
|
||||||
}
|
if (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {
|
||||||
// Check if the input is likely a single task object (add more checks if needed)
|
// Use processArrayOfTasks to handle the subtasks array
|
||||||
else if (typeof taskOrData === 'object' && taskOrData !== null && 'id' in taskOrData && 'title' in taskOrData) {
|
processedTask.subtasks = processArrayOfTasks(processedTask.subtasks);
|
||||||
return processSingleTask(taskOrData);
|
}
|
||||||
}
|
|
||||||
// Check if the input is an array of tasks directly (less common but possible)
|
return processedTask;
|
||||||
else if (Array.isArray(taskOrData)) {
|
};
|
||||||
return processArrayOfTasks(taskOrData);
|
|
||||||
}
|
// Helper function to process an array of tasks
|
||||||
|
const processArrayOfTasks = (tasks) => {
|
||||||
// If it doesn't match known task structures, return it as is
|
return tasks.map(processSingleTask);
|
||||||
return taskOrData;
|
};
|
||||||
|
|
||||||
|
// Check if the input is a data structure containing a 'tasks' array (like from listTasks)
|
||||||
|
if (
|
||||||
|
typeof taskOrData === 'object' &&
|
||||||
|
taskOrData !== null &&
|
||||||
|
Array.isArray(taskOrData.tasks)
|
||||||
|
) {
|
||||||
|
return {
|
||||||
|
...taskOrData, // Keep other potential fields like 'stats', 'filter'
|
||||||
|
tasks: processArrayOfTasks(taskOrData.tasks)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// Check if the input is likely a single task object (add more checks if needed)
|
||||||
|
else if (
|
||||||
|
typeof taskOrData === 'object' &&
|
||||||
|
taskOrData !== null &&
|
||||||
|
'id' in taskOrData &&
|
||||||
|
'title' in taskOrData
|
||||||
|
) {
|
||||||
|
return processSingleTask(taskOrData);
|
||||||
|
}
|
||||||
|
// Check if the input is an array of tasks directly (less common but possible)
|
||||||
|
else if (Array.isArray(taskOrData)) {
|
||||||
|
return processArrayOfTasks(taskOrData);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it doesn't match known task structures, return it as is
|
||||||
|
return taskOrData;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -378,19 +422,20 @@ function processMCPResponseData(taskOrData, fieldsToRemove = ['details', 'testSt
|
|||||||
* @returns {Object} - Content response object in FastMCP format
|
* @returns {Object} - Content response object in FastMCP format
|
||||||
*/
|
*/
|
||||||
function createContentResponse(content) {
|
function createContentResponse(content) {
|
||||||
// FastMCP requires text type, so we format objects as JSON strings
|
// FastMCP requires text type, so we format objects as JSON strings
|
||||||
return {
|
return {
|
||||||
content: [
|
content: [
|
||||||
{
|
{
|
||||||
type: "text",
|
type: 'text',
|
||||||
text: typeof content === 'object' ?
|
text:
|
||||||
// Format JSON nicely with indentation
|
typeof content === 'object'
|
||||||
JSON.stringify(content, null, 2) :
|
? // Format JSON nicely with indentation
|
||||||
// Keep other content types as-is
|
JSON.stringify(content, null, 2)
|
||||||
String(content)
|
: // Keep other content types as-is
|
||||||
}
|
String(content)
|
||||||
]
|
}
|
||||||
};
|
]
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -399,24 +444,24 @@ function createContentResponse(content) {
|
|||||||
* @returns {Object} - Error content response object in FastMCP format
|
* @returns {Object} - Error content response object in FastMCP format
|
||||||
*/
|
*/
|
||||||
export function createErrorResponse(errorMessage) {
|
export function createErrorResponse(errorMessage) {
|
||||||
return {
|
return {
|
||||||
content: [
|
content: [
|
||||||
{
|
{
|
||||||
type: "text",
|
type: 'text',
|
||||||
text: `Error: ${errorMessage}`
|
text: `Error: ${errorMessage}`
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
isError: true
|
isError: true
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure all functions are exported
|
// Ensure all functions are exported
|
||||||
export {
|
export {
|
||||||
getProjectRoot,
|
getProjectRoot,
|
||||||
getProjectRootFromSession,
|
getProjectRootFromSession,
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
executeTaskMasterCommand,
|
executeTaskMasterCommand,
|
||||||
getCachedOrExecute,
|
getCachedOrExecute,
|
||||||
processMCPResponseData,
|
processMCPResponseData,
|
||||||
createContentResponse,
|
createContentResponse
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -3,56 +3,68 @@
|
|||||||
* Tool for validating task dependencies
|
* Tool for validating task dependencies
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { z } from "zod";
|
import { z } from 'zod';
|
||||||
import {
|
import {
|
||||||
handleApiResult,
|
handleApiResult,
|
||||||
createErrorResponse,
|
createErrorResponse,
|
||||||
getProjectRootFromSession
|
getProjectRootFromSession
|
||||||
} from "./utils.js";
|
} from './utils.js';
|
||||||
import { validateDependenciesDirect } from "../core/task-master-core.js";
|
import { validateDependenciesDirect } from '../core/task-master-core.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the validateDependencies tool with the MCP server
|
* Register the validateDependencies tool with the MCP server
|
||||||
* @param {Object} server - FastMCP server instance
|
* @param {Object} server - FastMCP server instance
|
||||||
*/
|
*/
|
||||||
export function registerValidateDependenciesTool(server) {
|
export function registerValidateDependenciesTool(server) {
|
||||||
server.addTool({
|
server.addTool({
|
||||||
name: "validate_dependencies",
|
name: 'validate_dependencies',
|
||||||
description: "Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.",
|
description:
|
||||||
parameters: z.object({
|
'Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.',
|
||||||
file: z.string().optional().describe("Path to the tasks file"),
|
parameters: z.object({
|
||||||
projectRoot: z.string().optional().describe("Root directory of the project (default: current working directory)")
|
file: z.string().optional().describe('Path to the tasks file'),
|
||||||
}),
|
projectRoot: z
|
||||||
execute: async (args, { log, session, reportProgress }) => {
|
.string()
|
||||||
try {
|
.optional()
|
||||||
log.info(`Validating dependencies with args: ${JSON.stringify(args)}`);
|
.describe(
|
||||||
await reportProgress({ progress: 0 });
|
'Root directory of the project (default: current working directory)'
|
||||||
|
)
|
||||||
let rootFolder = getProjectRootFromSession(session, log);
|
}),
|
||||||
|
execute: async (args, { log, session, reportProgress }) => {
|
||||||
if (!rootFolder && args.projectRoot) {
|
try {
|
||||||
rootFolder = args.projectRoot;
|
log.info(`Validating dependencies with args: ${JSON.stringify(args)}`);
|
||||||
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
await reportProgress({ progress: 0 });
|
||||||
}
|
|
||||||
|
let rootFolder = getProjectRootFromSession(session, log);
|
||||||
const result = await validateDependenciesDirect({
|
|
||||||
projectRoot: rootFolder,
|
if (!rootFolder && args.projectRoot) {
|
||||||
...args
|
rootFolder = args.projectRoot;
|
||||||
}, log, { reportProgress, mcpLog: log, session});
|
log.info(`Using project root from args as fallback: ${rootFolder}`);
|
||||||
|
}
|
||||||
await reportProgress({ progress: 100 });
|
|
||||||
|
const result = await validateDependenciesDirect(
|
||||||
if (result.success) {
|
{
|
||||||
log.info(`Successfully validated dependencies: ${result.data.message}`);
|
projectRoot: rootFolder,
|
||||||
} else {
|
...args
|
||||||
log.error(`Failed to validate dependencies: ${result.error.message}`);
|
},
|
||||||
}
|
log,
|
||||||
|
{ reportProgress, mcpLog: log, session }
|
||||||
return handleApiResult(result, log, 'Error validating dependencies');
|
);
|
||||||
} catch (error) {
|
|
||||||
log.error(`Error in validateDependencies tool: ${error.message}`);
|
await reportProgress({ progress: 100 });
|
||||||
return createErrorResponse(error.message);
|
|
||||||
}
|
if (result.success) {
|
||||||
},
|
log.info(
|
||||||
});
|
`Successfully validated dependencies: ${result.data.message}`
|
||||||
}
|
);
|
||||||
|
} else {
|
||||||
|
log.error(`Failed to validate dependencies: ${result.error.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return handleApiResult(result, log, 'Error validating dependencies');
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error in validateDependencies tool: ${error.message}`);
|
||||||
|
return createErrorResponse(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
108
mcp-test.js
108
mcp-test.js
@@ -8,64 +8,68 @@ import fs from 'fs';
|
|||||||
console.error(`Current working directory: ${process.cwd()}`);
|
console.error(`Current working directory: ${process.cwd()}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
console.error('Attempting to load FastMCP Config...');
|
console.error('Attempting to load FastMCP Config...');
|
||||||
|
|
||||||
// Check if .cursor/mcp.json exists
|
// Check if .cursor/mcp.json exists
|
||||||
const mcpPath = path.join(process.cwd(), '.cursor', 'mcp.json');
|
const mcpPath = path.join(process.cwd(), '.cursor', 'mcp.json');
|
||||||
console.error(`Checking if mcp.json exists at: ${mcpPath}`);
|
console.error(`Checking if mcp.json exists at: ${mcpPath}`);
|
||||||
|
|
||||||
if (fs.existsSync(mcpPath)) {
|
if (fs.existsSync(mcpPath)) {
|
||||||
console.error('mcp.json file found');
|
console.error('mcp.json file found');
|
||||||
console.error(`File content: ${JSON.stringify(JSON.parse(fs.readFileSync(mcpPath, 'utf8')), null, 2)}`);
|
console.error(
|
||||||
} else {
|
`File content: ${JSON.stringify(JSON.parse(fs.readFileSync(mcpPath, 'utf8')), null, 2)}`
|
||||||
console.error('mcp.json file not found');
|
);
|
||||||
}
|
} else {
|
||||||
|
console.error('mcp.json file not found');
|
||||||
// Try to create Config
|
}
|
||||||
const config = new Config();
|
|
||||||
console.error('Config created successfully');
|
// Try to create Config
|
||||||
|
const config = new Config();
|
||||||
// Check if env property exists
|
console.error('Config created successfully');
|
||||||
if (config.env) {
|
|
||||||
console.error(`Config.env exists with keys: ${Object.keys(config.env).join(', ')}`);
|
// Check if env property exists
|
||||||
|
if (config.env) {
|
||||||
// Print each env var value (careful with sensitive values)
|
console.error(
|
||||||
for (const [key, value] of Object.entries(config.env)) {
|
`Config.env exists with keys: ${Object.keys(config.env).join(', ')}`
|
||||||
if (key.includes('KEY')) {
|
);
|
||||||
console.error(`${key}: [value hidden]`);
|
|
||||||
} else {
|
// Print each env var value (careful with sensitive values)
|
||||||
console.error(`${key}: ${value}`);
|
for (const [key, value] of Object.entries(config.env)) {
|
||||||
}
|
if (key.includes('KEY')) {
|
||||||
}
|
console.error(`${key}: [value hidden]`);
|
||||||
} else {
|
} else {
|
||||||
console.error('Config.env does not exist');
|
console.error(`${key}: ${value}`);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.error('Config.env does not exist');
|
||||||
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error(`Error loading Config: ${error.message}`);
|
console.error(`Error loading Config: ${error.message}`);
|
||||||
console.error(`Stack trace: ${error.stack}`);
|
console.error(`Stack trace: ${error.stack}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log process.env to see if values from mcp.json were loaded automatically
|
// Log process.env to see if values from mcp.json were loaded automatically
|
||||||
console.error('\nChecking if process.env already has values from mcp.json:');
|
console.error('\nChecking if process.env already has values from mcp.json:');
|
||||||
const envVars = [
|
const envVars = [
|
||||||
'ANTHROPIC_API_KEY',
|
'ANTHROPIC_API_KEY',
|
||||||
'PERPLEXITY_API_KEY',
|
'PERPLEXITY_API_KEY',
|
||||||
'MODEL',
|
'MODEL',
|
||||||
'PERPLEXITY_MODEL',
|
'PERPLEXITY_MODEL',
|
||||||
'MAX_TOKENS',
|
'MAX_TOKENS',
|
||||||
'TEMPERATURE',
|
'TEMPERATURE',
|
||||||
'DEFAULT_SUBTASKS',
|
'DEFAULT_SUBTASKS',
|
||||||
'DEFAULT_PRIORITY'
|
'DEFAULT_PRIORITY'
|
||||||
];
|
];
|
||||||
|
|
||||||
for (const varName of envVars) {
|
for (const varName of envVars) {
|
||||||
if (process.env[varName]) {
|
if (process.env[varName]) {
|
||||||
if (varName.includes('KEY')) {
|
if (varName.includes('KEY')) {
|
||||||
console.error(`${varName}: [value hidden]`);
|
console.error(`${varName}: [value hidden]`);
|
||||||
} else {
|
} else {
|
||||||
console.error(`${varName}: ${process.env[varName]}`);
|
console.error(`${varName}: ${process.env[varName]}`);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
console.error(`${varName}: not set`);
|
console.error(`${varName}: not set`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
10
output.json
10
output.json
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"key": "value",
|
"key": "value",
|
||||||
"nested": {
|
"nested": {
|
||||||
"prop": true
|
"prop": true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
16083
package-lock.json
generated
16083
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
191
package.json
191
package.json
@@ -1,96 +1,99 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.10.1",
|
"version": "0.10.1",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"bin": {
|
"bin": {
|
||||||
"task-master": "bin/task-master.js",
|
"task-master": "bin/task-master.js",
|
||||||
"task-master-init": "bin/task-master-init.js",
|
"task-master-init": "bin/task-master-init.js",
|
||||||
"task-master-mcp": "mcp-server/server.js",
|
"task-master-mcp": "mcp-server/server.js",
|
||||||
"task-master-mcp-server": "mcp-server/server.js"
|
"task-master-mcp-server": "mcp-server/server.js"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "node --experimental-vm-modules node_modules/.bin/jest",
|
"test": "node --experimental-vm-modules node_modules/.bin/jest",
|
||||||
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
|
"test:fails": "node --experimental-vm-modules node_modules/.bin/jest --onlyFailures",
|
||||||
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
|
"test:watch": "node --experimental-vm-modules node_modules/.bin/jest --watch",
|
||||||
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
|
"test:coverage": "node --experimental-vm-modules node_modules/.bin/jest --coverage",
|
||||||
"prepare-package": "node scripts/prepare-package.js",
|
"prepare-package": "node scripts/prepare-package.js",
|
||||||
"prepublishOnly": "npm run prepare-package",
|
"prepublishOnly": "npm run prepare-package",
|
||||||
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js mcp-server/server.js",
|
"prepare": "chmod +x bin/task-master.js bin/task-master-init.js mcp-server/server.js",
|
||||||
"changeset": "changeset",
|
"changeset": "changeset",
|
||||||
"release": "changeset publish",
|
"release": "changeset publish",
|
||||||
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js",
|
"inspector": "CLIENT_PORT=8888 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node mcp-server/server.js",
|
||||||
"mcp-server": "node mcp-server/server.js"
|
"mcp-server": "node mcp-server/server.js",
|
||||||
},
|
"format-check": "prettier --check .",
|
||||||
"keywords": [
|
"format": "prettier --write ."
|
||||||
"claude",
|
},
|
||||||
"task",
|
"keywords": [
|
||||||
"management",
|
"claude",
|
||||||
"ai",
|
"task",
|
||||||
"development",
|
"management",
|
||||||
"cursor",
|
"ai",
|
||||||
"anthropic",
|
"development",
|
||||||
"llm",
|
"cursor",
|
||||||
"mcp",
|
"anthropic",
|
||||||
"context"
|
"llm",
|
||||||
],
|
"mcp",
|
||||||
"author": "Eyal Toledano",
|
"context"
|
||||||
"license": "MIT WITH Commons-Clause",
|
],
|
||||||
"dependencies": {
|
"author": "Eyal Toledano",
|
||||||
"@anthropic-ai/sdk": "^0.39.0",
|
"license": "MIT WITH Commons-Clause",
|
||||||
"boxen": "^8.0.1",
|
"dependencies": {
|
||||||
"chalk": "^4.1.2",
|
"@anthropic-ai/sdk": "^0.39.0",
|
||||||
"cli-table3": "^0.6.5",
|
"boxen": "^8.0.1",
|
||||||
"commander": "^11.1.0",
|
"chalk": "^4.1.2",
|
||||||
"cors": "^2.8.5",
|
"cli-table3": "^0.6.5",
|
||||||
"dotenv": "^16.3.1",
|
"commander": "^11.1.0",
|
||||||
"express": "^4.21.2",
|
"cors": "^2.8.5",
|
||||||
"fastmcp": "^1.20.5",
|
"dotenv": "^16.3.1",
|
||||||
"figlet": "^1.8.0",
|
"express": "^4.21.2",
|
||||||
"fuse.js": "^7.0.0",
|
"fastmcp": "^1.20.5",
|
||||||
"gradient-string": "^3.0.0",
|
"figlet": "^1.8.0",
|
||||||
"helmet": "^8.1.0",
|
"fuse.js": "^7.0.0",
|
||||||
"inquirer": "^12.5.0",
|
"gradient-string": "^3.0.0",
|
||||||
"jsonwebtoken": "^9.0.2",
|
"helmet": "^8.1.0",
|
||||||
"lru-cache": "^10.2.0",
|
"inquirer": "^12.5.0",
|
||||||
"openai": "^4.89.0",
|
"jsonwebtoken": "^9.0.2",
|
||||||
"ora": "^8.2.0",
|
"lru-cache": "^10.2.0",
|
||||||
"uuid": "^11.1.0"
|
"openai": "^4.89.0",
|
||||||
},
|
"ora": "^8.2.0",
|
||||||
"engines": {
|
"uuid": "^11.1.0"
|
||||||
"node": ">=14.0.0"
|
},
|
||||||
},
|
"engines": {
|
||||||
"repository": {
|
"node": ">=14.0.0"
|
||||||
"type": "git",
|
},
|
||||||
"url": "git+https://github.com/eyaltoledano/claude-task-master.git"
|
"repository": {
|
||||||
},
|
"type": "git",
|
||||||
"homepage": "https://github.com/eyaltoledano/claude-task-master#readme",
|
"url": "git+https://github.com/eyaltoledano/claude-task-master.git"
|
||||||
"bugs": {
|
},
|
||||||
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
|
"homepage": "https://github.com/eyaltoledano/claude-task-master#readme",
|
||||||
},
|
"bugs": {
|
||||||
"files": [
|
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
|
||||||
"scripts/init.js",
|
},
|
||||||
"scripts/dev.js",
|
"files": [
|
||||||
"scripts/modules/**",
|
"scripts/init.js",
|
||||||
"assets/**",
|
"scripts/dev.js",
|
||||||
".cursor/**",
|
"scripts/modules/**",
|
||||||
"README-task-master.md",
|
"assets/**",
|
||||||
"index.js",
|
".cursor/**",
|
||||||
"bin/**",
|
"README-task-master.md",
|
||||||
"mcp-server/**"
|
"index.js",
|
||||||
],
|
"bin/**",
|
||||||
"overrides": {
|
"mcp-server/**"
|
||||||
"node-fetch": "^3.3.2",
|
],
|
||||||
"whatwg-url": "^11.0.0"
|
"overrides": {
|
||||||
},
|
"node-fetch": "^3.3.2",
|
||||||
"devDependencies": {
|
"whatwg-url": "^11.0.0"
|
||||||
"@changesets/changelog-github": "^0.5.1",
|
},
|
||||||
"@changesets/cli": "^2.28.1",
|
"devDependencies": {
|
||||||
"@types/jest": "^29.5.14",
|
"@changesets/changelog-github": "^0.5.1",
|
||||||
"jest": "^29.7.0",
|
"@changesets/cli": "^2.28.1",
|
||||||
"jest-environment-node": "^29.7.0",
|
"@types/jest": "^29.5.14",
|
||||||
"mock-fs": "^5.5.0",
|
"jest": "^29.7.0",
|
||||||
"supertest": "^7.1.0"
|
"jest-environment-node": "^29.7.0",
|
||||||
}
|
"mock-fs": "^5.5.0",
|
||||||
|
"prettier": "^3.5.3",
|
||||||
|
"supertest": "^7.1.0"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,9 +21,11 @@ In an AI-driven development process—particularly with tools like [Cursor](http
|
|||||||
The script can be configured through environment variables in a `.env` file at the root of the project:
|
The script can be configured through environment variables in a `.env` file at the root of the project:
|
||||||
|
|
||||||
### Required Configuration
|
### Required Configuration
|
||||||
|
|
||||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
- `ANTHROPIC_API_KEY`: Your Anthropic API key for Claude
|
||||||
|
|
||||||
### Optional Configuration
|
### Optional Configuration
|
||||||
|
|
||||||
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
- `MODEL`: Specify which Claude model to use (default: "claude-3-7-sonnet-20250219")
|
||||||
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
- `MAX_TOKENS`: Maximum tokens for model responses (default: 4000)
|
||||||
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
- `TEMPERATURE`: Temperature for model responses (default: 0.7)
|
||||||
@@ -38,9 +40,10 @@ The script can be configured through environment variables in a `.env` file at t
|
|||||||
|
|
||||||
## How It Works
|
## How It Works
|
||||||
|
|
||||||
1. **`tasks.json`**:
|
1. **`tasks.json`**:
|
||||||
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
|
||||||
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
- A JSON file at the project root containing an array of tasks (each with `id`, `title`, `description`, `status`, etc.).
|
||||||
|
- The `meta` field can store additional info like the project's name, version, or reference to the PRD.
|
||||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||||
|
|
||||||
@@ -102,6 +105,7 @@ node scripts/dev.js update --file=custom-tasks.json --from=5 --prompt="Change da
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- The `--prompt` parameter is required and should explain the changes or new context
|
- The `--prompt` parameter is required and should explain the changes or new context
|
||||||
- Only tasks that aren't marked as 'done' will be updated
|
- Only tasks that aren't marked as 'done' will be updated
|
||||||
- Tasks with ID >= the specified --from value will be updated
|
- Tasks with ID >= the specified --from value will be updated
|
||||||
@@ -120,6 +124,7 @@ node scripts/dev.js update-task --id=4 --prompt="Use JWT for authentication" --r
|
|||||||
```
|
```
|
||||||
|
|
||||||
This command:
|
This command:
|
||||||
|
|
||||||
- Updates only the specified task rather than a range of tasks
|
- Updates only the specified task rather than a range of tasks
|
||||||
- Provides detailed validation with helpful error messages
|
- Provides detailed validation with helpful error messages
|
||||||
- Checks for required API keys when using research mode
|
- Checks for required API keys when using research mode
|
||||||
@@ -146,6 +151,7 @@ node scripts/dev.js set-status --id=1,2,3 --status=done
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
|
- When marking a parent task as "done", all of its subtasks will automatically be marked as "done" as well
|
||||||
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
|
- Common status values are 'done', 'pending', and 'deferred', but any string is accepted
|
||||||
- You can specify multiple task IDs by separating them with commas
|
- You can specify multiple task IDs by separating them with commas
|
||||||
@@ -195,6 +201,7 @@ node scripts/dev.js clear-subtasks --all
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- After clearing subtasks, task files are automatically regenerated
|
- After clearing subtasks, task files are automatically regenerated
|
||||||
- This is useful when you want to regenerate subtasks with a different approach
|
- This is useful when you want to regenerate subtasks with a different approach
|
||||||
- Can be combined with the `expand` command to immediately generate new subtasks
|
- Can be combined with the `expand` command to immediately generate new subtasks
|
||||||
@@ -210,6 +217,7 @@ The script integrates with two AI services:
|
|||||||
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
|
The Perplexity integration uses the OpenAI client to connect to Perplexity's API, which provides enhanced research capabilities for generating more informed subtasks. If the Perplexity API is unavailable or encounters an error, the script will automatically fall back to using Anthropic's Claude.
|
||||||
|
|
||||||
To use the Perplexity integration:
|
To use the Perplexity integration:
|
||||||
|
|
||||||
1. Obtain a Perplexity API key
|
1. Obtain a Perplexity API key
|
||||||
2. Add `PERPLEXITY_API_KEY` to your `.env` file
|
2. Add `PERPLEXITY_API_KEY` to your `.env` file
|
||||||
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
|
3. Optionally specify `PERPLEXITY_MODEL` in your `.env` file (default: "sonar-medium-online")
|
||||||
@@ -218,6 +226,7 @@ To use the Perplexity integration:
|
|||||||
## Logging
|
## Logging
|
||||||
|
|
||||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
||||||
|
|
||||||
- `debug`: Detailed information, typically useful for troubleshooting
|
- `debug`: Detailed information, typically useful for troubleshooting
|
||||||
- `info`: Confirmation that things are working as expected (default)
|
- `info`: Confirmation that things are working as expected (default)
|
||||||
- `warn`: Warning messages that don't prevent execution
|
- `warn`: Warning messages that don't prevent execution
|
||||||
@@ -240,17 +249,20 @@ node scripts/dev.js remove-dependency --id=<id> --depends-on=<id>
|
|||||||
These commands:
|
These commands:
|
||||||
|
|
||||||
1. **Allow precise dependency management**:
|
1. **Allow precise dependency management**:
|
||||||
|
|
||||||
- Add dependencies between tasks with automatic validation
|
- Add dependencies between tasks with automatic validation
|
||||||
- Remove dependencies when they're no longer needed
|
- Remove dependencies when they're no longer needed
|
||||||
- Update task files automatically after changes
|
- Update task files automatically after changes
|
||||||
|
|
||||||
2. **Include validation checks**:
|
2. **Include validation checks**:
|
||||||
|
|
||||||
- Prevent circular dependencies (a task depending on itself)
|
- Prevent circular dependencies (a task depending on itself)
|
||||||
- Prevent duplicate dependencies
|
- Prevent duplicate dependencies
|
||||||
- Verify that both tasks exist before adding/removing dependencies
|
- Verify that both tasks exist before adding/removing dependencies
|
||||||
- Check if dependencies exist before attempting to remove them
|
- Check if dependencies exist before attempting to remove them
|
||||||
|
|
||||||
3. **Provide clear feedback**:
|
3. **Provide clear feedback**:
|
||||||
|
|
||||||
- Success messages confirm when dependencies are added/removed
|
- Success messages confirm when dependencies are added/removed
|
||||||
- Error messages explain why operations failed (if applicable)
|
- Error messages explain why operations failed (if applicable)
|
||||||
|
|
||||||
@@ -275,6 +287,7 @@ node scripts/dev.js validate-dependencies --file=custom-tasks.json
|
|||||||
```
|
```
|
||||||
|
|
||||||
This command:
|
This command:
|
||||||
|
|
||||||
- Scans all tasks and subtasks for non-existent dependencies
|
- Scans all tasks and subtasks for non-existent dependencies
|
||||||
- Identifies potential self-dependencies (tasks referencing themselves)
|
- Identifies potential self-dependencies (tasks referencing themselves)
|
||||||
- Reports all found issues without modifying files
|
- Reports all found issues without modifying files
|
||||||
@@ -296,6 +309,7 @@ node scripts/dev.js fix-dependencies --file=custom-tasks.json
|
|||||||
```
|
```
|
||||||
|
|
||||||
This command:
|
This command:
|
||||||
|
|
||||||
1. **Validates all dependencies** across tasks and subtasks
|
1. **Validates all dependencies** across tasks and subtasks
|
||||||
2. **Automatically removes**:
|
2. **Automatically removes**:
|
||||||
- References to non-existent tasks and subtasks
|
- References to non-existent tasks and subtasks
|
||||||
@@ -333,6 +347,7 @@ node scripts/dev.js analyze-complexity --research
|
|||||||
```
|
```
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
|
- The command uses Claude to analyze each task's complexity (or Perplexity with --research flag)
|
||||||
- Tasks are scored on a scale of 1-10
|
- Tasks are scored on a scale of 1-10
|
||||||
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
|
- Each task receives a recommended number of subtasks based on DEFAULT_SUBTASKS configuration
|
||||||
@@ -357,33 +372,35 @@ node scripts/dev.js expand --id=8 --num=5 --prompt="Custom prompt"
|
|||||||
```
|
```
|
||||||
|
|
||||||
When a complexity report exists:
|
When a complexity report exists:
|
||||||
|
|
||||||
- The `expand` command will use the recommended subtask count from the report (unless overridden)
|
- The `expand` command will use the recommended subtask count from the report (unless overridden)
|
||||||
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
|
- It will use the tailored expansion prompt from the report (unless a custom prompt is provided)
|
||||||
- When using `--all`, tasks are sorted by complexity score (highest first)
|
- When using `--all`, tasks are sorted by complexity score (highest first)
|
||||||
- The `--research` flag is preserved from the complexity analysis to expansion
|
- The `--research` flag is preserved from the complexity analysis to expansion
|
||||||
|
|
||||||
The output report structure is:
|
The output report structure is:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"meta": {
|
"meta": {
|
||||||
"generatedAt": "2023-06-15T12:34:56.789Z",
|
"generatedAt": "2023-06-15T12:34:56.789Z",
|
||||||
"tasksAnalyzed": 20,
|
"tasksAnalyzed": 20,
|
||||||
"thresholdScore": 5,
|
"thresholdScore": 5,
|
||||||
"projectName": "Your Project Name",
|
"projectName": "Your Project Name",
|
||||||
"usedResearch": true
|
"usedResearch": true
|
||||||
},
|
},
|
||||||
"complexityAnalysis": [
|
"complexityAnalysis": [
|
||||||
{
|
{
|
||||||
"taskId": 8,
|
"taskId": 8,
|
||||||
"taskTitle": "Develop Implementation Drift Handling",
|
"taskTitle": "Develop Implementation Drift Handling",
|
||||||
"complexityScore": 9.5,
|
"complexityScore": 9.5,
|
||||||
"recommendedSubtasks": 6,
|
"recommendedSubtasks": 6,
|
||||||
"expansionPrompt": "Create subtasks that handle detecting...",
|
"expansionPrompt": "Create subtasks that handle detecting...",
|
||||||
"reasoning": "This task requires sophisticated logic...",
|
"reasoning": "This task requires sophisticated logic...",
|
||||||
"expansionCommand": "node scripts/dev.js expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
"expansionCommand": "node scripts/dev.js expand --id=8 --num=6 --prompt=\"Create subtasks...\" --research"
|
||||||
},
|
}
|
||||||
// More tasks sorted by complexity score (highest first)
|
// More tasks sorted by complexity score (highest first)
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -457,16 +474,19 @@ This command is particularly useful when you need to examine a specific task in
|
|||||||
The script now includes improved error handling throughout all commands:
|
The script now includes improved error handling throughout all commands:
|
||||||
|
|
||||||
1. **Detailed Validation**:
|
1. **Detailed Validation**:
|
||||||
|
|
||||||
- Required parameters (like task IDs and prompts) are validated early
|
- Required parameters (like task IDs and prompts) are validated early
|
||||||
- File existence is checked with customized errors for common scenarios
|
- File existence is checked with customized errors for common scenarios
|
||||||
- Parameter type conversion is handled with clear error messages
|
- Parameter type conversion is handled with clear error messages
|
||||||
|
|
||||||
2. **Contextual Error Messages**:
|
2. **Contextual Error Messages**:
|
||||||
|
|
||||||
- Task not found errors include suggestions to run the list command
|
- Task not found errors include suggestions to run the list command
|
||||||
- API key errors include reminders to check environment variables
|
- API key errors include reminders to check environment variables
|
||||||
- Invalid ID format errors show the expected format
|
- Invalid ID format errors show the expected format
|
||||||
|
|
||||||
3. **Command-Specific Help Displays**:
|
3. **Command-Specific Help Displays**:
|
||||||
|
|
||||||
- When validation fails, detailed help for the specific command is shown
|
- When validation fails, detailed help for the specific command is shown
|
||||||
- Help displays include usage examples and parameter descriptions
|
- Help displays include usage examples and parameter descriptions
|
||||||
- Formatted in clear, color-coded boxes with examples
|
- Formatted in clear, color-coded boxes with examples
|
||||||
@@ -481,11 +501,13 @@ The script now includes improved error handling throughout all commands:
|
|||||||
The script now automatically checks for updates without slowing down execution:
|
The script now automatically checks for updates without slowing down execution:
|
||||||
|
|
||||||
1. **Background Version Checking**:
|
1. **Background Version Checking**:
|
||||||
|
|
||||||
- Non-blocking version checks run in the background while commands execute
|
- Non-blocking version checks run in the background while commands execute
|
||||||
- Actual command execution isn't delayed by version checking
|
- Actual command execution isn't delayed by version checking
|
||||||
- Update notifications appear after command completion
|
- Update notifications appear after command completion
|
||||||
|
|
||||||
2. **Update Notifications**:
|
2. **Update Notifications**:
|
||||||
|
|
||||||
- When a newer version is available, a notification is displayed
|
- When a newer version is available, a notification is displayed
|
||||||
- Notifications include current version, latest version, and update command
|
- Notifications include current version, latest version, and update command
|
||||||
- Formatted in an attention-grabbing box with clear instructions
|
- Formatted in an attention-grabbing box with clear instructions
|
||||||
@@ -516,6 +538,7 @@ node scripts/dev.js add-subtask --parent=5 --title="Login API route" --skip-gene
|
|||||||
```
|
```
|
||||||
|
|
||||||
Key features:
|
Key features:
|
||||||
|
|
||||||
- Create new subtasks with detailed properties or convert existing tasks
|
- Create new subtasks with detailed properties or convert existing tasks
|
||||||
- Define dependencies between subtasks
|
- Define dependencies between subtasks
|
||||||
- Set custom status for new subtasks
|
- Set custom status for new subtasks
|
||||||
@@ -538,7 +561,8 @@ node scripts/dev.js remove-subtask --id=5.2 --skip-generate
|
|||||||
```
|
```
|
||||||
|
|
||||||
Key features:
|
Key features:
|
||||||
|
|
||||||
- Remove subtasks individually or in batches
|
- Remove subtasks individually or in batches
|
||||||
- Optionally convert subtasks to standalone tasks
|
- Optionally convert subtasks to standalone tasks
|
||||||
- Control whether task files are regenerated
|
- Control whether task files are regenerated
|
||||||
- Provides detailed success messages and next steps
|
- Provides detailed success messages and next steps
|
||||||
|
|||||||
@@ -3,17 +3,17 @@
|
|||||||
/**
|
/**
|
||||||
* dev.js
|
* dev.js
|
||||||
* Task Master CLI - AI-driven development task management
|
* Task Master CLI - AI-driven development task management
|
||||||
*
|
*
|
||||||
* This is the refactored entry point that uses the modular architecture.
|
* This is the refactored entry point that uses the modular architecture.
|
||||||
* It imports functionality from the modules directory and provides a CLI.
|
* It imports functionality from the modules directory and provides a CLI.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Add at the very beginning of the file
|
// Add at the very beginning of the file
|
||||||
if (process.env.DEBUG === '1') {
|
if (process.env.DEBUG === '1') {
|
||||||
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
|
console.error('DEBUG - dev.js received args:', process.argv.slice(2));
|
||||||
}
|
}
|
||||||
|
|
||||||
import { runCLI } from './modules/commands.js';
|
import { runCLI } from './modules/commands.js';
|
||||||
|
|
||||||
// Run the CLI with the process arguments
|
// Run the CLI with the process arguments
|
||||||
runCLI(process.argv);
|
runCLI(process.argv);
|
||||||
|
|||||||
1636
scripts/init.js
1636
scripts/init.js
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -8,4 +8,4 @@ export * from './utils.js';
|
|||||||
export * from './ui.js';
|
export * from './ui.js';
|
||||||
export * from './ai-services.js';
|
export * from './ai-services.js';
|
||||||
export * from './task-manager.js';
|
export * from './task-manager.js';
|
||||||
export * from './commands.js';
|
export * from './commands.js';
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -9,15 +9,15 @@ import chalk from 'chalk';
|
|||||||
|
|
||||||
// Configuration and constants
|
// Configuration and constants
|
||||||
const CONFIG = {
|
const CONFIG = {
|
||||||
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
|
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
|
||||||
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
|
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
|
||||||
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
|
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
|
||||||
debug: process.env.DEBUG === "true",
|
debug: process.env.DEBUG === 'true',
|
||||||
logLevel: process.env.LOG_LEVEL || "info",
|
logLevel: process.env.LOG_LEVEL || 'info',
|
||||||
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || "3"),
|
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || '3'),
|
||||||
defaultPriority: process.env.DEFAULT_PRIORITY || "medium",
|
defaultPriority: process.env.DEFAULT_PRIORITY || 'medium',
|
||||||
projectName: process.env.PROJECT_NAME || "Task Master",
|
projectName: process.env.PROJECT_NAME || 'Task Master',
|
||||||
projectVersion: "1.5.0" // Hardcoded version - ALWAYS use this value, ignore environment variable
|
projectVersion: '1.5.0' // Hardcoded version - ALWAYS use this value, ignore environment variable
|
||||||
};
|
};
|
||||||
|
|
||||||
// Global silent mode flag
|
// Global silent mode flag
|
||||||
@@ -25,25 +25,25 @@ let silentMode = false;
|
|||||||
|
|
||||||
// Set up logging based on log level
|
// Set up logging based on log level
|
||||||
const LOG_LEVELS = {
|
const LOG_LEVELS = {
|
||||||
debug: 0,
|
debug: 0,
|
||||||
info: 1,
|
info: 1,
|
||||||
warn: 2,
|
warn: 2,
|
||||||
error: 3,
|
error: 3,
|
||||||
success: 1 // Treat success like info level
|
success: 1 // Treat success like info level
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enable silent logging mode
|
* Enable silent logging mode
|
||||||
*/
|
*/
|
||||||
function enableSilentMode() {
|
function enableSilentMode() {
|
||||||
silentMode = true;
|
silentMode = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Disable silent logging mode
|
* Disable silent logging mode
|
||||||
*/
|
*/
|
||||||
function disableSilentMode() {
|
function disableSilentMode() {
|
||||||
silentMode = false;
|
silentMode = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -51,7 +51,7 @@ function disableSilentMode() {
|
|||||||
* @returns {boolean} True if silent mode is enabled
|
* @returns {boolean} True if silent mode is enabled
|
||||||
*/
|
*/
|
||||||
function isSilentMode() {
|
function isSilentMode() {
|
||||||
return silentMode;
|
return silentMode;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -60,32 +60,36 @@ function isSilentMode() {
|
|||||||
* @param {...any} args - Arguments to log
|
* @param {...any} args - Arguments to log
|
||||||
*/
|
*/
|
||||||
function log(level, ...args) {
|
function log(level, ...args) {
|
||||||
// Immediately return if silentMode is enabled
|
// Immediately return if silentMode is enabled
|
||||||
if (silentMode) {
|
if (silentMode) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use text prefixes instead of emojis
|
// Use text prefixes instead of emojis
|
||||||
const prefixes = {
|
const prefixes = {
|
||||||
debug: chalk.gray("[DEBUG]"),
|
debug: chalk.gray('[DEBUG]'),
|
||||||
info: chalk.blue("[INFO]"),
|
info: chalk.blue('[INFO]'),
|
||||||
warn: chalk.yellow("[WARN]"),
|
warn: chalk.yellow('[WARN]'),
|
||||||
error: chalk.red("[ERROR]"),
|
error: chalk.red('[ERROR]'),
|
||||||
success: chalk.green("[SUCCESS]")
|
success: chalk.green('[SUCCESS]')
|
||||||
};
|
};
|
||||||
|
|
||||||
// Ensure level exists, default to info if not
|
// Ensure level exists, default to info if not
|
||||||
const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';
|
const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';
|
||||||
const configLevel = CONFIG.logLevel || 'info'; // Ensure configLevel has a default
|
const configLevel = CONFIG.logLevel || 'info'; // Ensure configLevel has a default
|
||||||
|
|
||||||
// Check log level configuration
|
// Check log level configuration
|
||||||
if (LOG_LEVELS[currentLevel] >= (LOG_LEVELS[configLevel] ?? LOG_LEVELS.info)) {
|
if (
|
||||||
const prefix = prefixes[currentLevel] || '';
|
LOG_LEVELS[currentLevel] >= (LOG_LEVELS[configLevel] ?? LOG_LEVELS.info)
|
||||||
// Use console.log for all levels, let chalk handle coloring
|
) {
|
||||||
// Construct the message properly
|
const prefix = prefixes[currentLevel] || '';
|
||||||
const message = args.map(arg => typeof arg === 'object' ? JSON.stringify(arg) : arg).join(' ');
|
// Use console.log for all levels, let chalk handle coloring
|
||||||
console.log(`${prefix} ${message}`);
|
// Construct the message properly
|
||||||
}
|
const message = args
|
||||||
|
.map((arg) => (typeof arg === 'object' ? JSON.stringify(arg) : arg))
|
||||||
|
.join(' ');
|
||||||
|
console.log(`${prefix} ${message}`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -94,17 +98,17 @@ function log(level, ...args) {
|
|||||||
* @returns {Object|null} Parsed JSON data or null if error occurs
|
* @returns {Object|null} Parsed JSON data or null if error occurs
|
||||||
*/
|
*/
|
||||||
function readJSON(filepath) {
|
function readJSON(filepath) {
|
||||||
try {
|
try {
|
||||||
const rawData = fs.readFileSync(filepath, 'utf8');
|
const rawData = fs.readFileSync(filepath, 'utf8');
|
||||||
return JSON.parse(rawData);
|
return JSON.parse(rawData);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `Error reading JSON file ${filepath}:`, error.message);
|
log('error', `Error reading JSON file ${filepath}:`, error.message);
|
||||||
if (CONFIG.debug) {
|
if (CONFIG.debug) {
|
||||||
// Use log utility for debug output too
|
// Use log utility for debug output too
|
||||||
log('error', 'Full error details:', error);
|
log('error', 'Full error details:', error);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -113,19 +117,19 @@ function readJSON(filepath) {
|
|||||||
* @param {Object} data - Data to write
|
* @param {Object} data - Data to write
|
||||||
*/
|
*/
|
||||||
function writeJSON(filepath, data) {
|
function writeJSON(filepath, data) {
|
||||||
try {
|
try {
|
||||||
const dir = path.dirname(filepath);
|
const dir = path.dirname(filepath);
|
||||||
if (!fs.existsSync(dir)) {
|
if (!fs.existsSync(dir)) {
|
||||||
fs.mkdirSync(dir, { recursive: true });
|
fs.mkdirSync(dir, { recursive: true });
|
||||||
}
|
}
|
||||||
fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8');
|
fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
log('error', `Error writing JSON file ${filepath}:`, error.message);
|
||||||
if (CONFIG.debug) {
|
if (CONFIG.debug) {
|
||||||
// Use log utility for debug output too
|
// Use log utility for debug output too
|
||||||
log('error', 'Full error details:', error);
|
log('error', 'Full error details:', error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -134,8 +138,8 @@ function writeJSON(filepath, data) {
|
|||||||
* @returns {string} Sanitized prompt
|
* @returns {string} Sanitized prompt
|
||||||
*/
|
*/
|
||||||
function sanitizePrompt(prompt) {
|
function sanitizePrompt(prompt) {
|
||||||
// Replace double quotes with escaped double quotes
|
// Replace double quotes with escaped double quotes
|
||||||
return prompt.replace(/"/g, '\\"');
|
return prompt.replace(/"/g, '\\"');
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -144,18 +148,20 @@ function sanitizePrompt(prompt) {
|
|||||||
* @returns {Object|null} The parsed complexity report or null if not found
|
* @returns {Object|null} The parsed complexity report or null if not found
|
||||||
*/
|
*/
|
||||||
function readComplexityReport(customPath = null) {
|
function readComplexityReport(customPath = null) {
|
||||||
try {
|
try {
|
||||||
const reportPath = customPath || path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
|
const reportPath =
|
||||||
if (!fs.existsSync(reportPath)) {
|
customPath ||
|
||||||
return null;
|
path.join(process.cwd(), 'scripts', 'task-complexity-report.json');
|
||||||
}
|
if (!fs.existsSync(reportPath)) {
|
||||||
|
return null;
|
||||||
const reportData = fs.readFileSync(reportPath, 'utf8');
|
}
|
||||||
return JSON.parse(reportData);
|
|
||||||
} catch (error) {
|
const reportData = fs.readFileSync(reportPath, 'utf8');
|
||||||
log('warn', `Could not read complexity report: ${error.message}`);
|
return JSON.parse(reportData);
|
||||||
return null;
|
} catch (error) {
|
||||||
}
|
log('warn', `Could not read complexity report: ${error.message}`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -165,11 +171,15 @@ function readComplexityReport(customPath = null) {
|
|||||||
* @returns {Object|null} The task analysis or null if not found
|
* @returns {Object|null} The task analysis or null if not found
|
||||||
*/
|
*/
|
||||||
function findTaskInComplexityReport(report, taskId) {
|
function findTaskInComplexityReport(report, taskId) {
|
||||||
if (!report || !report.complexityAnalysis || !Array.isArray(report.complexityAnalysis)) {
|
if (
|
||||||
return null;
|
!report ||
|
||||||
}
|
!report.complexityAnalysis ||
|
||||||
|
!Array.isArray(report.complexityAnalysis)
|
||||||
return report.complexityAnalysis.find(task => task.taskId === taskId);
|
) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return report.complexityAnalysis.find((task) => task.taskId === taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -179,24 +189,26 @@ function findTaskInComplexityReport(report, taskId) {
|
|||||||
* @returns {boolean} True if the task exists, false otherwise
|
* @returns {boolean} True if the task exists, false otherwise
|
||||||
*/
|
*/
|
||||||
function taskExists(tasks, taskId) {
|
function taskExists(tasks, taskId) {
|
||||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle both regular task IDs and subtask IDs (e.g., "1.2")
|
// Handle both regular task IDs and subtask IDs (e.g., "1.2")
|
||||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||||
const [parentId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
|
const [parentId, subtaskId] = taskId
|
||||||
const parentTask = tasks.find(t => t.id === parentId);
|
.split('.')
|
||||||
|
.map((id) => parseInt(id, 10));
|
||||||
if (!parentTask || !parentTask.subtasks) {
|
const parentTask = tasks.find((t) => t.id === parentId);
|
||||||
return false;
|
|
||||||
}
|
if (!parentTask || !parentTask.subtasks) {
|
||||||
|
return false;
|
||||||
return parentTask.subtasks.some(st => st.id === subtaskId);
|
}
|
||||||
}
|
|
||||||
|
return parentTask.subtasks.some((st) => st.id === subtaskId);
|
||||||
const id = parseInt(taskId, 10);
|
}
|
||||||
return tasks.some(t => t.id === id);
|
|
||||||
|
const id = parseInt(taskId, 10);
|
||||||
|
return tasks.some((t) => t.id === id);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -205,15 +217,15 @@ function taskExists(tasks, taskId) {
|
|||||||
* @returns {string} The formatted task ID
|
* @returns {string} The formatted task ID
|
||||||
*/
|
*/
|
||||||
function formatTaskId(id) {
|
function formatTaskId(id) {
|
||||||
if (typeof id === 'string' && id.includes('.')) {
|
if (typeof id === 'string' && id.includes('.')) {
|
||||||
return id; // Already formatted as a string with a dot (e.g., "1.2")
|
return id; // Already formatted as a string with a dot (e.g., "1.2")
|
||||||
}
|
}
|
||||||
|
|
||||||
if (typeof id === 'number') {
|
if (typeof id === 'number') {
|
||||||
return id.toString();
|
return id.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -223,35 +235,37 @@ function formatTaskId(id) {
|
|||||||
* @returns {Object|null} The task object or null if not found
|
* @returns {Object|null} The task object or null if not found
|
||||||
*/
|
*/
|
||||||
function findTaskById(tasks, taskId) {
|
function findTaskById(tasks, taskId) {
|
||||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if it's a subtask ID (e.g., "1.2")
|
// Check if it's a subtask ID (e.g., "1.2")
|
||||||
if (typeof taskId === 'string' && taskId.includes('.')) {
|
if (typeof taskId === 'string' && taskId.includes('.')) {
|
||||||
const [parentId, subtaskId] = taskId.split('.').map(id => parseInt(id, 10));
|
const [parentId, subtaskId] = taskId
|
||||||
const parentTask = tasks.find(t => t.id === parentId);
|
.split('.')
|
||||||
|
.map((id) => parseInt(id, 10));
|
||||||
if (!parentTask || !parentTask.subtasks) {
|
const parentTask = tasks.find((t) => t.id === parentId);
|
||||||
return null;
|
|
||||||
}
|
if (!parentTask || !parentTask.subtasks) {
|
||||||
|
return null;
|
||||||
const subtask = parentTask.subtasks.find(st => st.id === subtaskId);
|
}
|
||||||
if (subtask) {
|
|
||||||
// Add reference to parent task for context
|
const subtask = parentTask.subtasks.find((st) => st.id === subtaskId);
|
||||||
subtask.parentTask = {
|
if (subtask) {
|
||||||
id: parentTask.id,
|
// Add reference to parent task for context
|
||||||
title: parentTask.title,
|
subtask.parentTask = {
|
||||||
status: parentTask.status
|
id: parentTask.id,
|
||||||
};
|
title: parentTask.title,
|
||||||
subtask.isSubtask = true;
|
status: parentTask.status
|
||||||
}
|
};
|
||||||
|
subtask.isSubtask = true;
|
||||||
return subtask || null;
|
}
|
||||||
}
|
|
||||||
|
return subtask || null;
|
||||||
const id = parseInt(taskId, 10);
|
}
|
||||||
return tasks.find(t => t.id === id) || null;
|
|
||||||
|
const id = parseInt(taskId, 10);
|
||||||
|
return tasks.find((t) => t.id === id) || null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -261,11 +275,11 @@ function findTaskById(tasks, taskId) {
|
|||||||
* @returns {string} The truncated text
|
* @returns {string} The truncated text
|
||||||
*/
|
*/
|
||||||
function truncate(text, maxLength) {
|
function truncate(text, maxLength) {
|
||||||
if (!text || text.length <= maxLength) {
|
if (!text || text.length <= maxLength) {
|
||||||
return text;
|
return text;
|
||||||
}
|
}
|
||||||
|
|
||||||
return text.slice(0, maxLength - 3) + '...';
|
return text.slice(0, maxLength - 3) + '...';
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -276,39 +290,47 @@ function truncate(text, maxLength) {
|
|||||||
* @param {Set} recursionStack - Set of nodes in current recursion stack
|
* @param {Set} recursionStack - Set of nodes in current recursion stack
|
||||||
* @returns {Array} - List of dependency edges that need to be removed to break cycles
|
* @returns {Array} - List of dependency edges that need to be removed to break cycles
|
||||||
*/
|
*/
|
||||||
function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStack = new Set(), path = []) {
|
function findCycles(
|
||||||
// Mark the current node as visited and part of recursion stack
|
subtaskId,
|
||||||
visited.add(subtaskId);
|
dependencyMap,
|
||||||
recursionStack.add(subtaskId);
|
visited = new Set(),
|
||||||
path.push(subtaskId);
|
recursionStack = new Set(),
|
||||||
|
path = []
|
||||||
const cyclesToBreak = [];
|
) {
|
||||||
|
// Mark the current node as visited and part of recursion stack
|
||||||
// Get all dependencies of the current subtask
|
visited.add(subtaskId);
|
||||||
const dependencies = dependencyMap.get(subtaskId) || [];
|
recursionStack.add(subtaskId);
|
||||||
|
path.push(subtaskId);
|
||||||
// For each dependency
|
|
||||||
for (const depId of dependencies) {
|
const cyclesToBreak = [];
|
||||||
// If not visited, recursively check for cycles
|
|
||||||
if (!visited.has(depId)) {
|
// Get all dependencies of the current subtask
|
||||||
const cycles = findCycles(depId, dependencyMap, visited, recursionStack, [...path]);
|
const dependencies = dependencyMap.get(subtaskId) || [];
|
||||||
cyclesToBreak.push(...cycles);
|
|
||||||
}
|
// For each dependency
|
||||||
// If the dependency is in the recursion stack, we found a cycle
|
for (const depId of dependencies) {
|
||||||
else if (recursionStack.has(depId)) {
|
// If not visited, recursively check for cycles
|
||||||
// Find the position of the dependency in the path
|
if (!visited.has(depId)) {
|
||||||
const cycleStartIndex = path.indexOf(depId);
|
const cycles = findCycles(depId, dependencyMap, visited, recursionStack, [
|
||||||
// The last edge in the cycle is what we want to remove
|
...path
|
||||||
const cycleEdges = path.slice(cycleStartIndex);
|
]);
|
||||||
// We'll remove the last edge in the cycle (the one that points back)
|
cyclesToBreak.push(...cycles);
|
||||||
cyclesToBreak.push(depId);
|
}
|
||||||
}
|
// If the dependency is in the recursion stack, we found a cycle
|
||||||
}
|
else if (recursionStack.has(depId)) {
|
||||||
|
// Find the position of the dependency in the path
|
||||||
// Remove the node from recursion stack before returning
|
const cycleStartIndex = path.indexOf(depId);
|
||||||
recursionStack.delete(subtaskId);
|
// The last edge in the cycle is what we want to remove
|
||||||
|
const cycleEdges = path.slice(cycleStartIndex);
|
||||||
return cyclesToBreak;
|
// We'll remove the last edge in the cycle (the one that points back)
|
||||||
|
cyclesToBreak.push(depId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the node from recursion stack before returning
|
||||||
|
recursionStack.delete(subtaskId);
|
||||||
|
|
||||||
|
return cyclesToBreak;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -317,23 +339,23 @@ function findCycles(subtaskId, dependencyMap, visited = new Set(), recursionStac
|
|||||||
* @returns {string} The kebab-case version of the string
|
* @returns {string} The kebab-case version of the string
|
||||||
*/
|
*/
|
||||||
const toKebabCase = (str) => {
|
const toKebabCase = (str) => {
|
||||||
// Special handling for common acronyms
|
// Special handling for common acronyms
|
||||||
const withReplacedAcronyms = str
|
const withReplacedAcronyms = str
|
||||||
.replace(/ID/g, 'Id')
|
.replace(/ID/g, 'Id')
|
||||||
.replace(/API/g, 'Api')
|
.replace(/API/g, 'Api')
|
||||||
.replace(/UI/g, 'Ui')
|
.replace(/UI/g, 'Ui')
|
||||||
.replace(/URL/g, 'Url')
|
.replace(/URL/g, 'Url')
|
||||||
.replace(/URI/g, 'Uri')
|
.replace(/URI/g, 'Uri')
|
||||||
.replace(/JSON/g, 'Json')
|
.replace(/JSON/g, 'Json')
|
||||||
.replace(/XML/g, 'Xml')
|
.replace(/XML/g, 'Xml')
|
||||||
.replace(/HTML/g, 'Html')
|
.replace(/HTML/g, 'Html')
|
||||||
.replace(/CSS/g, 'Css');
|
.replace(/CSS/g, 'Css');
|
||||||
|
|
||||||
// Insert hyphens before capital letters and convert to lowercase
|
// Insert hyphens before capital letters and convert to lowercase
|
||||||
return withReplacedAcronyms
|
return withReplacedAcronyms
|
||||||
.replace(/([A-Z])/g, '-$1')
|
.replace(/([A-Z])/g, '-$1')
|
||||||
.toLowerCase()
|
.toLowerCase()
|
||||||
.replace(/^-/, ''); // Remove leading hyphen if present
|
.replace(/^-/, ''); // Remove leading hyphen if present
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -342,49 +364,49 @@ const toKebabCase = (str) => {
|
|||||||
* @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted
|
* @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted
|
||||||
*/
|
*/
|
||||||
function detectCamelCaseFlags(args) {
|
function detectCamelCaseFlags(args) {
|
||||||
const camelCaseFlags = [];
|
const camelCaseFlags = [];
|
||||||
for (const arg of args) {
|
for (const arg of args) {
|
||||||
if (arg.startsWith('--')) {
|
if (arg.startsWith('--')) {
|
||||||
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
const flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =
|
||||||
|
|
||||||
// Skip single-word flags - they can't be camelCase
|
// Skip single-word flags - they can't be camelCase
|
||||||
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
if (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for camelCase pattern (lowercase followed by uppercase)
|
// Check for camelCase pattern (lowercase followed by uppercase)
|
||||||
if (/[a-z][A-Z]/.test(flagName)) {
|
if (/[a-z][A-Z]/.test(flagName)) {
|
||||||
const kebabVersion = toKebabCase(flagName);
|
const kebabVersion = toKebabCase(flagName);
|
||||||
if (kebabVersion !== flagName) {
|
if (kebabVersion !== flagName) {
|
||||||
camelCaseFlags.push({
|
camelCaseFlags.push({
|
||||||
original: flagName,
|
original: flagName,
|
||||||
kebabCase: kebabVersion
|
kebabCase: kebabVersion
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return camelCaseFlags;
|
return camelCaseFlags;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Export all utility functions and configuration
|
// Export all utility functions and configuration
|
||||||
export {
|
export {
|
||||||
CONFIG,
|
CONFIG,
|
||||||
LOG_LEVELS,
|
LOG_LEVELS,
|
||||||
log,
|
log,
|
||||||
readJSON,
|
readJSON,
|
||||||
writeJSON,
|
writeJSON,
|
||||||
sanitizePrompt,
|
sanitizePrompt,
|
||||||
readComplexityReport,
|
readComplexityReport,
|
||||||
findTaskInComplexityReport,
|
findTaskInComplexityReport,
|
||||||
taskExists,
|
taskExists,
|
||||||
formatTaskId,
|
formatTaskId,
|
||||||
findTaskById,
|
findTaskById,
|
||||||
truncate,
|
truncate,
|
||||||
findCycles,
|
findCycles,
|
||||||
toKebabCase,
|
toKebabCase,
|
||||||
detectCamelCaseFlags,
|
detectCamelCaseFlags,
|
||||||
enableSilentMode,
|
enableSilentMode,
|
||||||
disableSilentMode,
|
disableSilentMode,
|
||||||
isSilentMode
|
isSilentMode
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
/**
|
/**
|
||||||
* This script prepares the package for publication to NPM.
|
* This script prepares the package for publication to NPM.
|
||||||
* It ensures all necessary files are included and properly configured.
|
* It ensures all necessary files are included and properly configured.
|
||||||
*
|
*
|
||||||
* Additional options:
|
* Additional options:
|
||||||
* --patch: Increment patch version (default)
|
* --patch: Increment patch version (default)
|
||||||
* --minor: Increment minor version
|
* --minor: Increment minor version
|
||||||
@@ -22,176 +22,190 @@ const __dirname = dirname(__filename);
|
|||||||
|
|
||||||
// Define colors for console output
|
// Define colors for console output
|
||||||
const COLORS = {
|
const COLORS = {
|
||||||
reset: '\x1b[0m',
|
reset: '\x1b[0m',
|
||||||
bright: '\x1b[1m',
|
bright: '\x1b[1m',
|
||||||
dim: '\x1b[2m',
|
dim: '\x1b[2m',
|
||||||
red: '\x1b[31m',
|
red: '\x1b[31m',
|
||||||
green: '\x1b[32m',
|
green: '\x1b[32m',
|
||||||
yellow: '\x1b[33m',
|
yellow: '\x1b[33m',
|
||||||
blue: '\x1b[34m',
|
blue: '\x1b[34m',
|
||||||
magenta: '\x1b[35m',
|
magenta: '\x1b[35m',
|
||||||
cyan: '\x1b[36m'
|
cyan: '\x1b[36m'
|
||||||
};
|
};
|
||||||
|
|
||||||
// Parse command line arguments
|
// Parse command line arguments
|
||||||
const args = process.argv.slice(2);
|
const args = process.argv.slice(2);
|
||||||
const versionBump = args.includes('--major') ? 'major' :
|
const versionBump = args.includes('--major')
|
||||||
args.includes('--minor') ? 'minor' :
|
? 'major'
|
||||||
'patch';
|
: args.includes('--minor')
|
||||||
|
? 'minor'
|
||||||
|
: 'patch';
|
||||||
|
|
||||||
// Check for explicit version
|
// Check for explicit version
|
||||||
const versionArg = args.find(arg => arg.startsWith('--version='));
|
const versionArg = args.find((arg) => arg.startsWith('--version='));
|
||||||
const explicitVersion = versionArg ? versionArg.split('=')[1] : null;
|
const explicitVersion = versionArg ? versionArg.split('=')[1] : null;
|
||||||
|
|
||||||
// Log function with color support
|
// Log function with color support
|
||||||
function log(level, ...args) {
|
function log(level, ...args) {
|
||||||
const prefix = {
|
const prefix = {
|
||||||
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
|
info: `${COLORS.blue}[INFO]${COLORS.reset}`,
|
||||||
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
|
warn: `${COLORS.yellow}[WARN]${COLORS.reset}`,
|
||||||
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
|
error: `${COLORS.red}[ERROR]${COLORS.reset}`,
|
||||||
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
|
success: `${COLORS.green}[SUCCESS]${COLORS.reset}`
|
||||||
}[level.toLowerCase()];
|
}[level.toLowerCase()];
|
||||||
|
|
||||||
console.log(prefix, ...args);
|
console.log(prefix, ...args);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to check if a file exists
|
// Function to check if a file exists
|
||||||
function fileExists(filePath) {
|
function fileExists(filePath) {
|
||||||
return fs.existsSync(filePath);
|
return fs.existsSync(filePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to ensure a file is executable
|
// Function to ensure a file is executable
|
||||||
function ensureExecutable(filePath) {
|
function ensureExecutable(filePath) {
|
||||||
try {
|
try {
|
||||||
fs.chmodSync(filePath, '755');
|
fs.chmodSync(filePath, '755');
|
||||||
log('info', `Made ${filePath} executable`);
|
log('info', `Made ${filePath} executable`);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `Failed to make ${filePath} executable:`, error.message);
|
log('error', `Failed to make ${filePath} executable:`, error.message);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to sync template files
|
// Function to sync template files
|
||||||
function syncTemplateFiles() {
|
function syncTemplateFiles() {
|
||||||
// We no longer need to sync files since we're using them directly
|
// We no longer need to sync files since we're using them directly
|
||||||
log('info', 'Template syncing has been deprecated - using source files directly');
|
log(
|
||||||
return true;
|
'info',
|
||||||
|
'Template syncing has been deprecated - using source files directly'
|
||||||
|
);
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to increment version
|
// Function to increment version
|
||||||
function incrementVersion(currentVersion, type = 'patch') {
|
function incrementVersion(currentVersion, type = 'patch') {
|
||||||
const [major, minor, patch] = currentVersion.split('.').map(Number);
|
const [major, minor, patch] = currentVersion.split('.').map(Number);
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case 'major':
|
case 'major':
|
||||||
return `${major + 1}.0.0`;
|
return `${major + 1}.0.0`;
|
||||||
case 'minor':
|
case 'minor':
|
||||||
return `${major}.${minor + 1}.0`;
|
return `${major}.${minor + 1}.0`;
|
||||||
case 'patch':
|
case 'patch':
|
||||||
default:
|
default:
|
||||||
return `${major}.${minor}.${patch + 1}`;
|
return `${major}.${minor}.${patch + 1}`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Main function to prepare the package
|
// Main function to prepare the package
|
||||||
function preparePackage() {
|
function preparePackage() {
|
||||||
const rootDir = path.join(__dirname, '..');
|
const rootDir = path.join(__dirname, '..');
|
||||||
log('info', `Preparing package in ${rootDir}`);
|
log('info', `Preparing package in ${rootDir}`);
|
||||||
|
|
||||||
// Update version in package.json
|
// Update version in package.json
|
||||||
const packageJsonPath = path.join(rootDir, 'package.json');
|
const packageJsonPath = path.join(rootDir, 'package.json');
|
||||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
||||||
const currentVersion = packageJson.version;
|
const currentVersion = packageJson.version;
|
||||||
|
|
||||||
let newVersion;
|
let newVersion;
|
||||||
if (explicitVersion) {
|
if (explicitVersion) {
|
||||||
newVersion = explicitVersion;
|
newVersion = explicitVersion;
|
||||||
log('info', `Setting version to specified ${newVersion} (was ${currentVersion})`);
|
log(
|
||||||
} else {
|
'info',
|
||||||
newVersion = incrementVersion(currentVersion, versionBump);
|
`Setting version to specified ${newVersion} (was ${currentVersion})`
|
||||||
log('info', `Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`);
|
);
|
||||||
}
|
} else {
|
||||||
|
newVersion = incrementVersion(currentVersion, versionBump);
|
||||||
packageJson.version = newVersion;
|
log(
|
||||||
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
|
'info',
|
||||||
log('success', `Updated package.json version to ${newVersion}`);
|
`Incrementing ${versionBump} version to ${newVersion} (was ${currentVersion})`
|
||||||
|
);
|
||||||
// Check for required files
|
}
|
||||||
const requiredFiles = [
|
|
||||||
'package.json',
|
packageJson.version = newVersion;
|
||||||
'README-task-master.md',
|
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
|
||||||
'index.js',
|
log('success', `Updated package.json version to ${newVersion}`);
|
||||||
'scripts/init.js',
|
|
||||||
'scripts/dev.js',
|
// Check for required files
|
||||||
'assets/env.example',
|
const requiredFiles = [
|
||||||
'assets/gitignore',
|
'package.json',
|
||||||
'assets/example_prd.txt',
|
'README-task-master.md',
|
||||||
'assets/scripts_README.md',
|
'index.js',
|
||||||
'.cursor/rules/dev_workflow.mdc',
|
'scripts/init.js',
|
||||||
'.cursor/rules/taskmaster.mdc',
|
'scripts/dev.js',
|
||||||
'.cursor/rules/cursor_rules.mdc',
|
'assets/env.example',
|
||||||
'.cursor/rules/self_improve.mdc'
|
'assets/gitignore',
|
||||||
];
|
'assets/example_prd.txt',
|
||||||
|
'assets/scripts_README.md',
|
||||||
let allFilesExist = true;
|
'.cursor/rules/dev_workflow.mdc',
|
||||||
for (const file of requiredFiles) {
|
'.cursor/rules/taskmaster.mdc',
|
||||||
const filePath = path.join(rootDir, file);
|
'.cursor/rules/cursor_rules.mdc',
|
||||||
if (!fileExists(filePath)) {
|
'.cursor/rules/self_improve.mdc'
|
||||||
log('error', `Required file ${file} does not exist`);
|
];
|
||||||
allFilesExist = false;
|
|
||||||
}
|
let allFilesExist = true;
|
||||||
}
|
for (const file of requiredFiles) {
|
||||||
|
const filePath = path.join(rootDir, file);
|
||||||
if (!allFilesExist) {
|
if (!fileExists(filePath)) {
|
||||||
log('error', 'Some required files are missing. Package preparation failed.');
|
log('error', `Required file ${file} does not exist`);
|
||||||
process.exit(1);
|
allFilesExist = false;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// Ensure scripts are executable
|
|
||||||
const executableScripts = [
|
if (!allFilesExist) {
|
||||||
'scripts/init.js',
|
log(
|
||||||
'scripts/dev.js'
|
'error',
|
||||||
];
|
'Some required files are missing. Package preparation failed.'
|
||||||
|
);
|
||||||
let allScriptsExecutable = true;
|
process.exit(1);
|
||||||
for (const script of executableScripts) {
|
}
|
||||||
const scriptPath = path.join(rootDir, script);
|
|
||||||
if (!ensureExecutable(scriptPath)) {
|
// Ensure scripts are executable
|
||||||
allScriptsExecutable = false;
|
const executableScripts = ['scripts/init.js', 'scripts/dev.js'];
|
||||||
}
|
|
||||||
}
|
let allScriptsExecutable = true;
|
||||||
|
for (const script of executableScripts) {
|
||||||
if (!allScriptsExecutable) {
|
const scriptPath = path.join(rootDir, script);
|
||||||
log('warn', 'Some scripts could not be made executable. This may cause issues.');
|
if (!ensureExecutable(scriptPath)) {
|
||||||
}
|
allScriptsExecutable = false;
|
||||||
|
}
|
||||||
// Run npm pack to test package creation
|
}
|
||||||
try {
|
|
||||||
log('info', 'Running npm pack to test package creation...');
|
if (!allScriptsExecutable) {
|
||||||
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
|
log(
|
||||||
log('info', output);
|
'warn',
|
||||||
} catch (error) {
|
'Some scripts could not be made executable. This may cause issues.'
|
||||||
log('error', 'Failed to run npm pack:', error.message);
|
);
|
||||||
process.exit(1);
|
}
|
||||||
}
|
|
||||||
|
// Run npm pack to test package creation
|
||||||
// Make scripts executable
|
try {
|
||||||
log('info', 'Making scripts executable...');
|
log('info', 'Running npm pack to test package creation...');
|
||||||
try {
|
const output = execSync('npm pack --dry-run', { cwd: rootDir }).toString();
|
||||||
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
|
log('info', output);
|
||||||
log('info', 'Made scripts/init.js executable');
|
} catch (error) {
|
||||||
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
|
log('error', 'Failed to run npm pack:', error.message);
|
||||||
log('info', 'Made scripts/dev.js executable');
|
process.exit(1);
|
||||||
} catch (error) {
|
}
|
||||||
log('error', 'Failed to make scripts executable:', error.message);
|
|
||||||
}
|
// Make scripts executable
|
||||||
|
log('info', 'Making scripts executable...');
|
||||||
log('success', `Package preparation completed successfully! 🎉`);
|
try {
|
||||||
log('success', `Version updated to ${newVersion}`);
|
execSync('chmod +x scripts/init.js', { stdio: 'ignore' });
|
||||||
log('info', 'You can now publish the package with:');
|
log('info', 'Made scripts/init.js executable');
|
||||||
log('info', ' npm publish');
|
execSync('chmod +x scripts/dev.js', { stdio: 'ignore' });
|
||||||
|
log('info', 'Made scripts/dev.js executable');
|
||||||
|
} catch (error) {
|
||||||
|
log('error', 'Failed to make scripts executable:', error.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
log('success', `Package preparation completed successfully! 🎉`);
|
||||||
|
log('success', `Version updated to ${newVersion}`);
|
||||||
|
log('info', 'You can now publish the package with:');
|
||||||
|
log('info', ' npm publish');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the preparation
|
// Run the preparation
|
||||||
preparePackage();
|
preparePackage();
|
||||||
|
|||||||
@@ -1,203 +1,203 @@
|
|||||||
{
|
{
|
||||||
"meta": {
|
"meta": {
|
||||||
"generatedAt": "2025-03-24T20:01:35.986Z",
|
"generatedAt": "2025-03-24T20:01:35.986Z",
|
||||||
"tasksAnalyzed": 24,
|
"tasksAnalyzed": 24,
|
||||||
"thresholdScore": 5,
|
"thresholdScore": 5,
|
||||||
"projectName": "Your Project Name",
|
"projectName": "Your Project Name",
|
||||||
"usedResearch": false
|
"usedResearch": false
|
||||||
},
|
},
|
||||||
"complexityAnalysis": [
|
"complexityAnalysis": [
|
||||||
{
|
{
|
||||||
"taskId": 1,
|
"taskId": 1,
|
||||||
"taskTitle": "Implement Task Data Structure",
|
"taskTitle": "Implement Task Data Structure",
|
||||||
"complexityScore": 7,
|
"complexityScore": 7,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
|
"expansionPrompt": "Break down the implementation of the core tasks.json data structure into subtasks that cover schema design, model implementation, validation, file operations, and error handling. For each subtask, include specific technical requirements and acceptance criteria.",
|
||||||
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
|
"reasoning": "This task requires designing a foundational data structure that will be used throughout the system. It involves schema design, validation logic, and file system operations, which together represent moderate to high complexity. The task is critical as many other tasks depend on it."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 2,
|
"taskId": 2,
|
||||||
"taskTitle": "Develop Command Line Interface Foundation",
|
"taskTitle": "Develop Command Line Interface Foundation",
|
||||||
"complexityScore": 6,
|
"complexityScore": 6,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
|
"expansionPrompt": "Divide the CLI foundation implementation into subtasks covering Commander.js setup, help documentation creation, console output formatting, and global options handling. Each subtask should specify implementation details and how it integrates with the overall CLI structure.",
|
||||||
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
|
"reasoning": "Setting up the CLI foundation requires integrating Commander.js, implementing various command-line options, and establishing the output formatting system. The complexity is moderate as it involves creating the interface layer that users will interact with."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 3,
|
"taskId": 3,
|
||||||
"taskTitle": "Implement Basic Task Operations",
|
"taskTitle": "Implement Basic Task Operations",
|
||||||
"complexityScore": 8,
|
"complexityScore": 8,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
|
"expansionPrompt": "Break down the implementation of basic task operations into subtasks covering CRUD operations, status management, dependency handling, and priority management. Each subtask should detail the specific operations, validation requirements, and error cases to handle.",
|
||||||
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
|
"reasoning": "This task encompasses multiple operations (create, read, update, delete) along with status changes, dependency management, and priority handling. It represents high complexity due to the breadth of functionality and the need to ensure data integrity across operations."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 4,
|
"taskId": 4,
|
||||||
"taskTitle": "Create Task File Generation System",
|
"taskTitle": "Create Task File Generation System",
|
||||||
"complexityScore": 7,
|
"complexityScore": 7,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
|
"expansionPrompt": "Divide the task file generation system into subtasks covering template creation, file generation logic, bi-directional synchronization, and file organization. Each subtask should specify the technical approach, edge cases to handle, and integration points with the task data structure.",
|
||||||
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
|
"reasoning": "Implementing file generation with bi-directional synchronization presents significant complexity due to the need to maintain consistency between individual files and the central tasks.json. The system must handle updates in either direction and resolve potential conflicts."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 5,
|
"taskId": 5,
|
||||||
"taskTitle": "Integrate Anthropic Claude API",
|
"taskTitle": "Integrate Anthropic Claude API",
|
||||||
"complexityScore": 6,
|
"complexityScore": 6,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
|
"expansionPrompt": "Break down the Claude API integration into subtasks covering authentication setup, prompt template creation, response handling, and error management with retries. Each subtask should detail the specific implementation approach, including security considerations and performance optimizations.",
|
||||||
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
|
"reasoning": "Integrating with the Claude API involves setting up authentication, creating effective prompts, and handling responses and errors. The complexity is moderate, focusing on establishing a reliable connection to the external service with proper error handling and retry logic."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 6,
|
"taskId": 6,
|
||||||
"taskTitle": "Build PRD Parsing System",
|
"taskTitle": "Build PRD Parsing System",
|
||||||
"complexityScore": 8,
|
"complexityScore": 8,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
|
"expansionPrompt": "Divide the PRD parsing system into subtasks covering file reading, prompt engineering, content-to-task conversion, dependency inference, priority assignment, and handling large documents. Each subtask should specify the AI interaction approach, data transformation steps, and validation requirements.",
|
||||||
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
|
"reasoning": "Parsing PRDs into structured tasks requires sophisticated prompt engineering and intelligent processing of unstructured text. The complexity is high due to the need to accurately extract tasks, infer dependencies, and handle potentially large documents with varying formats."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 7,
|
"taskId": 7,
|
||||||
"taskTitle": "Implement Task Expansion with Claude",
|
"taskTitle": "Implement Task Expansion with Claude",
|
||||||
"complexityScore": 7,
|
"complexityScore": 7,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
|
"expansionPrompt": "Break down the task expansion functionality into subtasks covering prompt creation for subtask generation, expansion workflow implementation, parent-child relationship management, and regeneration mechanisms. Each subtask should detail the AI interaction patterns, data structures, and user experience considerations.",
|
||||||
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
|
"reasoning": "Task expansion involves complex AI interactions to generate meaningful subtasks and manage their relationships with parent tasks. The complexity comes from creating effective prompts that produce useful subtasks and implementing a smooth workflow for users to generate and refine these subtasks."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 8,
|
"taskId": 8,
|
||||||
"taskTitle": "Develop Implementation Drift Handling",
|
"taskTitle": "Develop Implementation Drift Handling",
|
||||||
"complexityScore": 9,
|
"complexityScore": 9,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
|
"expansionPrompt": "Divide the implementation drift handling into subtasks covering change detection, task rewriting based on new context, dependency chain updates, work preservation, and update suggestion analysis. Each subtask should specify the algorithms, heuristics, and AI prompts needed to effectively manage implementation changes.",
|
||||||
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
|
"reasoning": "This task involves the complex challenge of updating future tasks based on changes in implementation. It requires sophisticated analysis of completed work, understanding how it affects pending tasks, and intelligently updating those tasks while preserving dependencies. This represents high complexity due to the need for context-aware AI reasoning."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 9,
|
"taskId": 9,
|
||||||
"taskTitle": "Integrate Perplexity API",
|
"taskTitle": "Integrate Perplexity API",
|
||||||
"complexityScore": 5,
|
"complexityScore": 5,
|
||||||
"recommendedSubtasks": 3,
|
"recommendedSubtasks": 3,
|
||||||
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
|
"expansionPrompt": "Break down the Perplexity API integration into subtasks covering authentication setup, research-oriented prompt creation, response handling, and fallback mechanisms. Each subtask should detail the implementation approach, integration with existing systems, and quality comparison metrics.",
|
||||||
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
|
"reasoning": "Similar to the Claude integration but slightly less complex, this task focuses on connecting to the Perplexity API for research capabilities. The complexity is moderate, involving API authentication, prompt templates, and response handling with fallback mechanisms to Claude."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 10,
|
"taskId": 10,
|
||||||
"taskTitle": "Create Research-Backed Subtask Generation",
|
"taskTitle": "Create Research-Backed Subtask Generation",
|
||||||
"complexityScore": 7,
|
"complexityScore": 7,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
|
"expansionPrompt": "Divide the research-backed subtask generation into subtasks covering domain-specific prompt creation, context enrichment from research, knowledge incorporation, and detailed subtask generation. Each subtask should specify the approach for leveraging research data and integrating it into the generation process.",
|
||||||
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
|
"reasoning": "This task builds on previous work to enhance subtask generation with research capabilities. The complexity comes from effectively incorporating research results into the generation process and creating domain-specific prompts that produce high-quality, detailed subtasks with best practices."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 11,
|
"taskId": 11,
|
||||||
"taskTitle": "Implement Batch Operations",
|
"taskTitle": "Implement Batch Operations",
|
||||||
"complexityScore": 6,
|
"complexityScore": 6,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
|
"expansionPrompt": "Break down the batch operations functionality into subtasks covering multi-task status updates, bulk subtask generation, task filtering/querying, and batch prioritization. Each subtask should detail the command interface, implementation approach, and performance considerations for handling multiple tasks.",
|
||||||
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
|
"reasoning": "Implementing batch operations requires extending existing functionality to work with multiple tasks simultaneously. The complexity is moderate, focusing on efficient processing of task sets, filtering capabilities, and maintaining data consistency across bulk operations."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 12,
|
"taskId": 12,
|
||||||
"taskTitle": "Develop Project Initialization System",
|
"taskTitle": "Develop Project Initialization System",
|
||||||
"complexityScore": 6,
|
"complexityScore": 6,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
|
"expansionPrompt": "Divide the project initialization system into subtasks covering project templating, interactive setup wizard, environment configuration, directory structure creation, and example generation. Each subtask should specify the user interaction flow, template design, and integration with existing components.",
|
||||||
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
|
"reasoning": "Creating a project initialization system involves setting up templates, an interactive wizard, and generating initial files and directories. The complexity is moderate, focusing on providing a smooth setup experience for new projects with appropriate defaults and configuration."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 13,
|
"taskId": 13,
|
||||||
"taskTitle": "Create Cursor Rules Implementation",
|
"taskTitle": "Create Cursor Rules Implementation",
|
||||||
"complexityScore": 5,
|
"complexityScore": 5,
|
||||||
"recommendedSubtasks": 3,
|
"recommendedSubtasks": 3,
|
||||||
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
|
"expansionPrompt": "Break down the Cursor rules implementation into subtasks covering documentation creation (dev_workflow.mdc, cursor_rules.mdc, self_improve.mdc), directory structure setup, and integration documentation. Each subtask should detail the specific content to include and how it enables effective AI interaction.",
|
||||||
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
|
"reasoning": "This task focuses on creating documentation and rules for Cursor AI integration. The complexity is moderate, involving the creation of structured documentation files that define how AI should interact with the system and setting up the appropriate directory structure."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 14,
|
"taskId": 14,
|
||||||
"taskTitle": "Develop Agent Workflow Guidelines",
|
"taskTitle": "Develop Agent Workflow Guidelines",
|
||||||
"complexityScore": 5,
|
"complexityScore": 5,
|
||||||
"recommendedSubtasks": 3,
|
"recommendedSubtasks": 3,
|
||||||
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
|
"expansionPrompt": "Divide the agent workflow guidelines into subtasks covering task discovery documentation, selection guidelines, implementation guidance, verification procedures, and prioritization rules. Each subtask should specify the specific guidance to provide and how it enables effective agent workflows.",
|
||||||
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
|
"reasoning": "Creating comprehensive guidelines for AI agents involves documenting workflows, selection criteria, and implementation guidance. The complexity is moderate, focusing on clear documentation that helps agents interact effectively with the task system."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 15,
|
"taskId": 15,
|
||||||
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
|
"taskTitle": "Optimize Agent Integration with Cursor and dev.js Commands",
|
||||||
"complexityScore": 6,
|
"complexityScore": 6,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
|
"expansionPrompt": "Break down the agent integration optimization into subtasks covering existing pattern documentation, Cursor-dev.js command integration enhancement, workflow documentation improvement, and feature additions. Each subtask should specify the specific improvements to make and how they enhance agent interaction.",
|
||||||
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
|
"reasoning": "This task involves enhancing and documenting existing agent interaction patterns with Cursor and dev.js commands. The complexity is moderate, focusing on improving integration between different components and ensuring agents can effectively utilize the system's capabilities."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 16,
|
"taskId": 16,
|
||||||
"taskTitle": "Create Configuration Management System",
|
"taskTitle": "Create Configuration Management System",
|
||||||
"complexityScore": 6,
|
"complexityScore": 6,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
|
"expansionPrompt": "Divide the configuration management system into subtasks covering environment variable handling, .env file support, configuration validation, defaults with overrides, and secure API key handling. Each subtask should specify the implementation approach, security considerations, and user experience for configuration.",
|
||||||
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
|
"reasoning": "Implementing robust configuration management involves handling environment variables, .env files, validation, and secure storage of sensitive information. The complexity is moderate, focusing on creating a flexible system that works across different environments with appropriate security measures."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 17,
|
"taskId": 17,
|
||||||
"taskTitle": "Implement Comprehensive Logging System",
|
"taskTitle": "Implement Comprehensive Logging System",
|
||||||
"complexityScore": 5,
|
"complexityScore": 5,
|
||||||
"recommendedSubtasks": 3,
|
"recommendedSubtasks": 3,
|
||||||
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
|
"expansionPrompt": "Break down the logging system implementation into subtasks covering log level configuration, output destination management, specialized logging (commands, APIs, errors), and performance metrics. Each subtask should detail the implementation approach, configuration options, and integration with existing components.",
|
||||||
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
|
"reasoning": "Creating a comprehensive logging system involves implementing multiple log levels, configurable destinations, and specialized logging for different components. The complexity is moderate, focusing on providing useful information for debugging and monitoring while maintaining performance."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 18,
|
"taskId": 18,
|
||||||
"taskTitle": "Create Comprehensive User Documentation",
|
"taskTitle": "Create Comprehensive User Documentation",
|
||||||
"complexityScore": 7,
|
"complexityScore": 7,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
|
"expansionPrompt": "Divide the user documentation creation into subtasks covering README with installation instructions, command reference, configuration guide, example workflows, troubleshooting guides, and advanced usage. Each subtask should specify the content to include, format, and organization to ensure comprehensive coverage.",
|
||||||
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
|
"reasoning": "Creating comprehensive documentation requires covering installation, usage, configuration, examples, and troubleshooting across multiple components. The complexity is moderate to high due to the breadth of functionality to document and the need to make it accessible to different user levels."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 19,
|
"taskId": 19,
|
||||||
"taskTitle": "Implement Error Handling and Recovery",
|
"taskTitle": "Implement Error Handling and Recovery",
|
||||||
"complexityScore": 8,
|
"complexityScore": 8,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
|
"expansionPrompt": "Break down the error handling implementation into subtasks covering consistent error formatting, helpful error messages, API error handling with retries, file system error recovery, validation errors, and system state recovery. Each subtask should detail the specific error types to handle, recovery strategies, and user communication approach.",
|
||||||
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
|
"reasoning": "Implementing robust error handling across the entire system represents high complexity due to the variety of error types, the need for meaningful messages, and the implementation of recovery mechanisms. This task is critical for system reliability and user experience."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 20,
|
"taskId": 20,
|
||||||
"taskTitle": "Create Token Usage Tracking and Cost Management",
|
"taskTitle": "Create Token Usage Tracking and Cost Management",
|
||||||
"complexityScore": 7,
|
"complexityScore": 7,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
|
"expansionPrompt": "Divide the token tracking and cost management into subtasks covering usage tracking implementation, configurable limits, reporting features, cost estimation, caching for optimization, and usage alerts. Each subtask should specify the implementation approach, data storage, and user interface for monitoring and managing usage.",
|
||||||
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
|
"reasoning": "Implementing token usage tracking involves monitoring API calls, calculating costs, implementing limits, and optimizing usage through caching. The complexity is moderate to high, focusing on providing users with visibility into their API consumption and tools to manage costs."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 21,
|
"taskId": 21,
|
||||||
"taskTitle": "Refactor dev.js into Modular Components",
|
"taskTitle": "Refactor dev.js into Modular Components",
|
||||||
"complexityScore": 8,
|
"complexityScore": 8,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
|
"expansionPrompt": "Break down the refactoring of dev.js into subtasks covering module design (commands.js, ai-services.js, task-manager.js, ui.js, utils.js), entry point restructuring, dependency management, error handling standardization, and documentation. Each subtask should detail the specific code to extract, interfaces to define, and integration points between modules.",
|
||||||
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
|
"reasoning": "Refactoring a monolithic file into modular components represents high complexity due to the need to identify appropriate boundaries, manage dependencies between modules, and ensure all functionality is preserved. This requires deep understanding of the existing codebase and careful restructuring."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 22,
|
"taskId": 22,
|
||||||
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
|
"taskTitle": "Create Comprehensive Test Suite for Task Master CLI",
|
||||||
"complexityScore": 9,
|
"complexityScore": 9,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
|
"expansionPrompt": "Divide the test suite creation into subtasks covering unit test implementation, integration test development, end-to-end test creation, mocking setup, and CI integration. Each subtask should specify the testing approach, coverage goals, test data preparation, and specific functionality to test.",
|
||||||
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
|
"reasoning": "Developing a comprehensive test suite represents high complexity due to the need to cover unit, integration, and end-to-end tests across all functionality, implement appropriate mocking, and ensure good test coverage. This requires significant test engineering and understanding of the entire system."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 23,
|
"taskId": 23,
|
||||||
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
|
"taskTitle": "Implement MCP (Model Context Protocol) Server Functionality for Task Master",
|
||||||
"complexityScore": 9,
|
"complexityScore": 9,
|
||||||
"recommendedSubtasks": 5,
|
"recommendedSubtasks": 5,
|
||||||
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
|
"expansionPrompt": "Break down the MCP server implementation into subtasks covering core server module creation, endpoint implementation (/context, /models, /execute), context management system, authentication mechanisms, and performance optimization. Each subtask should detail the API design, data structures, and integration with existing Task Master functionality.",
|
||||||
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
|
"reasoning": "Implementing an MCP server represents high complexity due to the need to create a RESTful API with multiple endpoints, manage context data efficiently, handle authentication, and ensure compatibility with the MCP specification. This requires significant API design and server-side development work."
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"taskId": 24,
|
"taskId": 24,
|
||||||
"taskTitle": "Implement AI-Powered Test Generation Command",
|
"taskTitle": "Implement AI-Powered Test Generation Command",
|
||||||
"complexityScore": 7,
|
"complexityScore": 7,
|
||||||
"recommendedSubtasks": 4,
|
"recommendedSubtasks": 4,
|
||||||
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
|
"expansionPrompt": "Divide the test generation command implementation into subtasks covering command structure and parameter handling, task analysis logic, AI prompt construction, and test file generation. Each subtask should specify the implementation approach, AI interaction pattern, and output formatting requirements.",
|
||||||
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
|
"reasoning": "Creating an AI-powered test generation command involves analyzing tasks, constructing effective prompts, and generating well-formatted test files. The complexity is moderate to high, focusing on leveraging AI to produce useful tests based on task descriptions and subtasks."
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* test-claude-errors.js
|
* test-claude-errors.js
|
||||||
*
|
*
|
||||||
* A test script to verify the error handling and retry logic in the callClaude function.
|
* A test script to verify the error handling and retry logic in the callClaude function.
|
||||||
* This script creates a modified version of dev.js that simulates different error scenarios.
|
* This script creates a modified version of dev.js that simulates different error scenarios.
|
||||||
*/
|
*/
|
||||||
@@ -22,7 +22,7 @@ dotenv.config();
|
|||||||
|
|
||||||
// Create a simple PRD for testing
|
// Create a simple PRD for testing
|
||||||
const createTestPRD = () => {
|
const createTestPRD = () => {
|
||||||
return `# Test PRD for Error Handling
|
return `# Test PRD for Error Handling
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
This is a simple test PRD to verify the error handling in the callClaude function.
|
This is a simple test PRD to verify the error handling in the callClaude function.
|
||||||
@@ -36,21 +36,22 @@ This is a simple test PRD to verify the error handling in the callClaude functio
|
|||||||
|
|
||||||
// Create a modified version of dev.js that simulates errors
|
// Create a modified version of dev.js that simulates errors
|
||||||
function createErrorSimulationScript(errorType, failureCount = 2) {
|
function createErrorSimulationScript(errorType, failureCount = 2) {
|
||||||
// Read the original dev.js file
|
// Read the original dev.js file
|
||||||
const devJsPath = path.join(__dirname, 'dev.js');
|
const devJsPath = path.join(__dirname, 'dev.js');
|
||||||
const devJsContent = fs.readFileSync(devJsPath, 'utf8');
|
const devJsContent = fs.readFileSync(devJsPath, 'utf8');
|
||||||
|
|
||||||
// Create a modified version that simulates errors
|
// Create a modified version that simulates errors
|
||||||
let modifiedContent = devJsContent;
|
let modifiedContent = devJsContent;
|
||||||
|
|
||||||
// Find the anthropic.messages.create call and replace it with our mock
|
// Find the anthropic.messages.create call and replace it with our mock
|
||||||
const anthropicCallRegex = /const response = await anthropic\.messages\.create\(/;
|
const anthropicCallRegex =
|
||||||
|
/const response = await anthropic\.messages\.create\(/;
|
||||||
let mockCode = '';
|
|
||||||
|
let mockCode = '';
|
||||||
switch (errorType) {
|
|
||||||
case 'network':
|
switch (errorType) {
|
||||||
mockCode = `
|
case 'network':
|
||||||
|
mockCode = `
|
||||||
// Mock for network error simulation
|
// Mock for network error simulation
|
||||||
let currentAttempt = 0;
|
let currentAttempt = 0;
|
||||||
const failureCount = ${failureCount};
|
const failureCount = ${failureCount};
|
||||||
@@ -65,10 +66,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const response = await anthropic.messages.create(`;
|
const response = await anthropic.messages.create(`;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'timeout':
|
case 'timeout':
|
||||||
mockCode = `
|
mockCode = `
|
||||||
// Mock for timeout error simulation
|
// Mock for timeout error simulation
|
||||||
let currentAttempt = 0;
|
let currentAttempt = 0;
|
||||||
const failureCount = ${failureCount};
|
const failureCount = ${failureCount};
|
||||||
@@ -83,10 +84,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const response = await anthropic.messages.create(`;
|
const response = await anthropic.messages.create(`;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'invalid-json':
|
case 'invalid-json':
|
||||||
mockCode = `
|
mockCode = `
|
||||||
// Mock for invalid JSON response
|
// Mock for invalid JSON response
|
||||||
let currentAttempt = 0;
|
let currentAttempt = 0;
|
||||||
const failureCount = ${failureCount};
|
const failureCount = ${failureCount};
|
||||||
@@ -107,10 +108,10 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const response = await anthropic.messages.create(`;
|
const response = await anthropic.messages.create(`;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'empty-tasks':
|
case 'empty-tasks':
|
||||||
mockCode = `
|
mockCode = `
|
||||||
// Mock for empty tasks array
|
// Mock for empty tasks array
|
||||||
let currentAttempt = 0;
|
let currentAttempt = 0;
|
||||||
const failureCount = ${failureCount};
|
const failureCount = ${failureCount};
|
||||||
@@ -131,82 +132,87 @@ function createErrorSimulationScript(errorType, failureCount = 2) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const response = await anthropic.messages.create(`;
|
const response = await anthropic.messages.create(`;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// No modification
|
// No modification
|
||||||
mockCode = `const response = await anthropic.messages.create(`;
|
mockCode = `const response = await anthropic.messages.create(`;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace the anthropic call with our mock
|
// Replace the anthropic call with our mock
|
||||||
modifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);
|
modifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);
|
||||||
|
|
||||||
// Write the modified script to a temporary file
|
// Write the modified script to a temporary file
|
||||||
const tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);
|
const tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);
|
||||||
fs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');
|
fs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');
|
||||||
|
|
||||||
return tempScriptPath;
|
return tempScriptPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to run a test with a specific error type
|
// Function to run a test with a specific error type
|
||||||
async function runErrorTest(errorType, numTasks = 5, failureCount = 2) {
|
async function runErrorTest(errorType, numTasks = 5, failureCount = 2) {
|
||||||
console.log(`\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);
|
console.log(`\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);
|
||||||
|
|
||||||
// Create a test PRD
|
// Create a test PRD
|
||||||
const testPRD = createTestPRD();
|
const testPRD = createTestPRD();
|
||||||
const testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);
|
const testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);
|
||||||
fs.writeFileSync(testPRDPath, testPRD, 'utf8');
|
fs.writeFileSync(testPRDPath, testPRD, 'utf8');
|
||||||
|
|
||||||
// Create a modified dev.js that simulates the specified error
|
// Create a modified dev.js that simulates the specified error
|
||||||
const tempScriptPath = createErrorSimulationScript(errorType, failureCount);
|
const tempScriptPath = createErrorSimulationScript(errorType, failureCount);
|
||||||
|
|
||||||
console.log(`Created test PRD at ${testPRDPath}`);
|
console.log(`Created test PRD at ${testPRDPath}`);
|
||||||
console.log(`Created error simulation script at ${tempScriptPath}`);
|
console.log(`Created error simulation script at ${tempScriptPath}`);
|
||||||
console.log(`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`);
|
console.log(
|
||||||
|
`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`
|
||||||
try {
|
);
|
||||||
// Run the modified script
|
|
||||||
execSync(`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`, {
|
try {
|
||||||
stdio: 'inherit'
|
// Run the modified script
|
||||||
});
|
execSync(
|
||||||
console.log(`${errorType} error test completed successfully`);
|
`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`,
|
||||||
} catch (error) {
|
{
|
||||||
console.error(`${errorType} error test failed:`, error.message);
|
stdio: 'inherit'
|
||||||
} finally {
|
}
|
||||||
// Clean up temporary files
|
);
|
||||||
if (fs.existsSync(tempScriptPath)) {
|
console.log(`${errorType} error test completed successfully`);
|
||||||
fs.unlinkSync(tempScriptPath);
|
} catch (error) {
|
||||||
}
|
console.error(`${errorType} error test failed:`, error.message);
|
||||||
if (fs.existsSync(testPRDPath)) {
|
} finally {
|
||||||
fs.unlinkSync(testPRDPath);
|
// Clean up temporary files
|
||||||
}
|
if (fs.existsSync(tempScriptPath)) {
|
||||||
}
|
fs.unlinkSync(tempScriptPath);
|
||||||
|
}
|
||||||
|
if (fs.existsSync(testPRDPath)) {
|
||||||
|
fs.unlinkSync(testPRDPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function to run all error tests
|
// Function to run all error tests
|
||||||
async function runAllErrorTests() {
|
async function runAllErrorTests() {
|
||||||
console.log('Starting error handling tests for callClaude function...');
|
console.log('Starting error handling tests for callClaude function...');
|
||||||
|
|
||||||
// Test 1: Network error with automatic retry
|
// Test 1: Network error with automatic retry
|
||||||
await runErrorTest('network', 5, 2);
|
await runErrorTest('network', 5, 2);
|
||||||
|
|
||||||
// Test 2: Timeout error with automatic retry
|
// Test 2: Timeout error with automatic retry
|
||||||
await runErrorTest('timeout', 5, 2);
|
await runErrorTest('timeout', 5, 2);
|
||||||
|
|
||||||
// Test 3: Invalid JSON response with task reduction
|
// Test 3: Invalid JSON response with task reduction
|
||||||
await runErrorTest('invalid-json', 10, 2);
|
await runErrorTest('invalid-json', 10, 2);
|
||||||
|
|
||||||
// Test 4: Empty tasks array with task reduction
|
// Test 4: Empty tasks array with task reduction
|
||||||
await runErrorTest('empty-tasks', 15, 2);
|
await runErrorTest('empty-tasks', 15, 2);
|
||||||
|
|
||||||
// Test 5: Exhausted retries (more failures than MAX_RETRIES)
|
// Test 5: Exhausted retries (more failures than MAX_RETRIES)
|
||||||
await runErrorTest('network', 5, 4);
|
await runErrorTest('network', 5, 4);
|
||||||
|
|
||||||
console.log('\nAll error tests completed!');
|
console.log('\nAll error tests completed!');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the tests
|
// Run the tests
|
||||||
runAllErrorTests().catch(error => {
|
runAllErrorTests().catch((error) => {
|
||||||
console.error('Error running tests:', error);
|
console.error('Error running tests:', error);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* test-claude.js
|
* test-claude.js
|
||||||
*
|
*
|
||||||
* A simple test script to verify the improvements to the callClaude function.
|
* A simple test script to verify the improvements to the callClaude function.
|
||||||
* This script tests different scenarios:
|
* This script tests different scenarios:
|
||||||
* 1. Normal operation with a small PRD
|
* 1. Normal operation with a small PRD
|
||||||
@@ -24,11 +24,11 @@ dotenv.config();
|
|||||||
|
|
||||||
// Create a simple PRD for testing
|
// Create a simple PRD for testing
|
||||||
const createTestPRD = (size = 'small', taskComplexity = 'simple') => {
|
const createTestPRD = (size = 'small', taskComplexity = 'simple') => {
|
||||||
let content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\n\n`;
|
let content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\n\n`;
|
||||||
|
|
||||||
// Add more content based on size
|
// Add more content based on size
|
||||||
if (size === 'small') {
|
if (size === 'small') {
|
||||||
content += `
|
content += `
|
||||||
## Overview
|
## Overview
|
||||||
This is a small test PRD to verify the callClaude function improvements.
|
This is a small test PRD to verify the callClaude function improvements.
|
||||||
|
|
||||||
@@ -44,9 +44,9 @@ This is a small test PRD to verify the callClaude function improvements.
|
|||||||
- Backend: Node.js
|
- Backend: Node.js
|
||||||
- Database: MongoDB
|
- Database: MongoDB
|
||||||
`;
|
`;
|
||||||
} else if (size === 'medium') {
|
} else if (size === 'medium') {
|
||||||
// Medium-sized PRD with more requirements
|
// Medium-sized PRD with more requirements
|
||||||
content += `
|
content += `
|
||||||
## Overview
|
## Overview
|
||||||
This is a medium-sized test PRD to verify the callClaude function improvements.
|
This is a medium-sized test PRD to verify the callClaude function improvements.
|
||||||
|
|
||||||
@@ -76,20 +76,20 @@ This is a medium-sized test PRD to verify the callClaude function improvements.
|
|||||||
- CI/CD: GitHub Actions
|
- CI/CD: GitHub Actions
|
||||||
- Monitoring: Prometheus and Grafana
|
- Monitoring: Prometheus and Grafana
|
||||||
`;
|
`;
|
||||||
} else if (size === 'large') {
|
} else if (size === 'large') {
|
||||||
// Large PRD with many requirements
|
// Large PRD with many requirements
|
||||||
content += `
|
content += `
|
||||||
## Overview
|
## Overview
|
||||||
This is a large test PRD to verify the callClaude function improvements.
|
This is a large test PRD to verify the callClaude function improvements.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
`;
|
`;
|
||||||
// Generate 30 requirements
|
// Generate 30 requirements
|
||||||
for (let i = 1; i <= 30; i++) {
|
for (let i = 1; i <= 30; i++) {
|
||||||
content += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\n`;
|
content += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\n`;
|
||||||
}
|
}
|
||||||
|
|
||||||
content += `
|
content += `
|
||||||
## Technical Stack
|
## Technical Stack
|
||||||
- Frontend: React with TypeScript
|
- Frontend: React with TypeScript
|
||||||
- Backend: Node.js with Express
|
- Backend: Node.js with Express
|
||||||
@@ -101,12 +101,12 @@ This is a large test PRD to verify the callClaude function improvements.
|
|||||||
|
|
||||||
## User Stories
|
## User Stories
|
||||||
`;
|
`;
|
||||||
// Generate 20 user stories
|
// Generate 20 user stories
|
||||||
for (let i = 1; i <= 20; i++) {
|
for (let i = 1; i <= 20; i++) {
|
||||||
content += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\n`;
|
content += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\n`;
|
||||||
}
|
}
|
||||||
|
|
||||||
content += `
|
content += `
|
||||||
## Non-Functional Requirements
|
## Non-Functional Requirements
|
||||||
- Performance: The system should respond within 200ms
|
- Performance: The system should respond within 200ms
|
||||||
- Scalability: The system should handle 10,000 concurrent users
|
- Scalability: The system should handle 10,000 concurrent users
|
||||||
@@ -114,11 +114,11 @@ This is a large test PRD to verify the callClaude function improvements.
|
|||||||
- Security: The system should comply with OWASP top 10
|
- Security: The system should comply with OWASP top 10
|
||||||
- Accessibility: The system should comply with WCAG 2.1 AA
|
- Accessibility: The system should comply with WCAG 2.1 AA
|
||||||
`;
|
`;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add complexity if needed
|
// Add complexity if needed
|
||||||
if (taskComplexity === 'complex') {
|
if (taskComplexity === 'complex') {
|
||||||
content += `
|
content += `
|
||||||
## Complex Requirements
|
## Complex Requirements
|
||||||
- Implement a real-time collaboration system
|
- Implement a real-time collaboration system
|
||||||
- Add a machine learning-based recommendation engine
|
- Add a machine learning-based recommendation engine
|
||||||
@@ -131,101 +131,110 @@ This is a large test PRD to verify the callClaude function improvements.
|
|||||||
- Implement a custom reporting system
|
- Implement a custom reporting system
|
||||||
- Add a custom dashboard builder
|
- Add a custom dashboard builder
|
||||||
`;
|
`;
|
||||||
}
|
}
|
||||||
|
|
||||||
return content;
|
return content;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Function to run the tests
|
// Function to run the tests
|
||||||
async function runTests() {
|
async function runTests() {
|
||||||
console.log('Starting tests for callClaude function improvements...');
|
console.log('Starting tests for callClaude function improvements...');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Instead of importing the callClaude function directly, we'll use the dev.js script
|
// Instead of importing the callClaude function directly, we'll use the dev.js script
|
||||||
// with our test PRDs by running it as a child process
|
// with our test PRDs by running it as a child process
|
||||||
|
|
||||||
// Test 1: Small PRD, 5 tasks
|
// Test 1: Small PRD, 5 tasks
|
||||||
console.log('\n=== Test 1: Small PRD, 5 tasks ===');
|
console.log('\n=== Test 1: Small PRD, 5 tasks ===');
|
||||||
const smallPRD = createTestPRD('small', 'simple');
|
const smallPRD = createTestPRD('small', 'simple');
|
||||||
const smallPRDPath = path.join(__dirname, 'test-small-prd.txt');
|
const smallPRDPath = path.join(__dirname, 'test-small-prd.txt');
|
||||||
fs.writeFileSync(smallPRDPath, smallPRD, 'utf8');
|
fs.writeFileSync(smallPRDPath, smallPRD, 'utf8');
|
||||||
|
|
||||||
console.log(`Created test PRD at ${smallPRDPath}`);
|
console.log(`Created test PRD at ${smallPRDPath}`);
|
||||||
console.log('Running dev.js with small PRD...');
|
console.log('Running dev.js with small PRD...');
|
||||||
|
|
||||||
// Use the child_process module to run the dev.js script
|
// Use the child_process module to run the dev.js script
|
||||||
const { execSync } = await import('child_process');
|
const { execSync } = await import('child_process');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const smallResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`, {
|
const smallResult = execSync(
|
||||||
stdio: 'inherit'
|
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --tasks=5`,
|
||||||
});
|
{
|
||||||
console.log('Small PRD test completed successfully');
|
stdio: 'inherit'
|
||||||
} catch (error) {
|
}
|
||||||
console.error('Small PRD test failed:', error.message);
|
);
|
||||||
}
|
console.log('Small PRD test completed successfully');
|
||||||
|
} catch (error) {
|
||||||
// Test 2: Medium PRD, 15 tasks
|
console.error('Small PRD test failed:', error.message);
|
||||||
console.log('\n=== Test 2: Medium PRD, 15 tasks ===');
|
}
|
||||||
const mediumPRD = createTestPRD('medium', 'simple');
|
|
||||||
const mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');
|
// Test 2: Medium PRD, 15 tasks
|
||||||
fs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');
|
console.log('\n=== Test 2: Medium PRD, 15 tasks ===');
|
||||||
|
const mediumPRD = createTestPRD('medium', 'simple');
|
||||||
console.log(`Created test PRD at ${mediumPRDPath}`);
|
const mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');
|
||||||
console.log('Running dev.js with medium PRD...');
|
fs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');
|
||||||
|
|
||||||
try {
|
console.log(`Created test PRD at ${mediumPRDPath}`);
|
||||||
const mediumResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`, {
|
console.log('Running dev.js with medium PRD...');
|
||||||
stdio: 'inherit'
|
|
||||||
});
|
try {
|
||||||
console.log('Medium PRD test completed successfully');
|
const mediumResult = execSync(
|
||||||
} catch (error) {
|
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --tasks=15`,
|
||||||
console.error('Medium PRD test failed:', error.message);
|
{
|
||||||
}
|
stdio: 'inherit'
|
||||||
|
}
|
||||||
// Test 3: Large PRD, 25 tasks
|
);
|
||||||
console.log('\n=== Test 3: Large PRD, 25 tasks ===');
|
console.log('Medium PRD test completed successfully');
|
||||||
const largePRD = createTestPRD('large', 'complex');
|
} catch (error) {
|
||||||
const largePRDPath = path.join(__dirname, 'test-large-prd.txt');
|
console.error('Medium PRD test failed:', error.message);
|
||||||
fs.writeFileSync(largePRDPath, largePRD, 'utf8');
|
}
|
||||||
|
|
||||||
console.log(`Created test PRD at ${largePRDPath}`);
|
// Test 3: Large PRD, 25 tasks
|
||||||
console.log('Running dev.js with large PRD...');
|
console.log('\n=== Test 3: Large PRD, 25 tasks ===');
|
||||||
|
const largePRD = createTestPRD('large', 'complex');
|
||||||
try {
|
const largePRDPath = path.join(__dirname, 'test-large-prd.txt');
|
||||||
const largeResult = execSync(`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`, {
|
fs.writeFileSync(largePRDPath, largePRD, 'utf8');
|
||||||
stdio: 'inherit'
|
|
||||||
});
|
console.log(`Created test PRD at ${largePRDPath}`);
|
||||||
console.log('Large PRD test completed successfully');
|
console.log('Running dev.js with large PRD...');
|
||||||
} catch (error) {
|
|
||||||
console.error('Large PRD test failed:', error.message);
|
try {
|
||||||
}
|
const largeResult = execSync(
|
||||||
|
`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --tasks=25`,
|
||||||
console.log('\nAll tests completed!');
|
{
|
||||||
} catch (error) {
|
stdio: 'inherit'
|
||||||
console.error('Test failed:', error);
|
}
|
||||||
} finally {
|
);
|
||||||
// Clean up test files
|
console.log('Large PRD test completed successfully');
|
||||||
console.log('\nCleaning up test files...');
|
} catch (error) {
|
||||||
const testFiles = [
|
console.error('Large PRD test failed:', error.message);
|
||||||
path.join(__dirname, 'test-small-prd.txt'),
|
}
|
||||||
path.join(__dirname, 'test-medium-prd.txt'),
|
|
||||||
path.join(__dirname, 'test-large-prd.txt')
|
console.log('\nAll tests completed!');
|
||||||
];
|
} catch (error) {
|
||||||
|
console.error('Test failed:', error);
|
||||||
testFiles.forEach(file => {
|
} finally {
|
||||||
if (fs.existsSync(file)) {
|
// Clean up test files
|
||||||
fs.unlinkSync(file);
|
console.log('\nCleaning up test files...');
|
||||||
console.log(`Deleted ${file}`);
|
const testFiles = [
|
||||||
}
|
path.join(__dirname, 'test-small-prd.txt'),
|
||||||
});
|
path.join(__dirname, 'test-medium-prd.txt'),
|
||||||
|
path.join(__dirname, 'test-large-prd.txt')
|
||||||
console.log('Cleanup complete.');
|
];
|
||||||
}
|
|
||||||
|
testFiles.forEach((file) => {
|
||||||
|
if (fs.existsSync(file)) {
|
||||||
|
fs.unlinkSync(file);
|
||||||
|
console.log(`Deleted ${file}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log('Cleanup complete.');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the tests
|
// Run the tests
|
||||||
runTests().catch(error => {
|
runTests().catch((error) => {
|
||||||
console.error('Error running tests:', error);
|
console.error('Error running tests:', error);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,4 +1,8 @@
|
|||||||
import { checkForUpdate, displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
|
import {
|
||||||
|
checkForUpdate,
|
||||||
|
displayUpgradeNotification,
|
||||||
|
compareVersions
|
||||||
|
} from './scripts/modules/commands.js';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import path from 'path';
|
import path from 'path';
|
||||||
|
|
||||||
@@ -7,63 +11,73 @@ process.env.FORCE_VERSION = '0.9.30';
|
|||||||
|
|
||||||
// Create a mock package.json in memory for testing
|
// Create a mock package.json in memory for testing
|
||||||
const mockPackageJson = {
|
const mockPackageJson = {
|
||||||
name: 'task-master-ai',
|
name: 'task-master-ai',
|
||||||
version: '0.9.30'
|
version: '0.9.30'
|
||||||
};
|
};
|
||||||
|
|
||||||
// Modified version of checkForUpdate that doesn't use HTTP for testing
|
// Modified version of checkForUpdate that doesn't use HTTP for testing
|
||||||
async function testCheckForUpdate(simulatedLatestVersion) {
|
async function testCheckForUpdate(simulatedLatestVersion) {
|
||||||
// Get current version - use our forced version
|
// Get current version - use our forced version
|
||||||
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
|
const currentVersion = process.env.FORCE_VERSION || '0.9.30';
|
||||||
|
|
||||||
console.log(`Using simulated current version: ${currentVersion}`);
|
console.log(`Using simulated current version: ${currentVersion}`);
|
||||||
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
|
console.log(`Using simulated latest version: ${simulatedLatestVersion}`);
|
||||||
|
|
||||||
// Compare versions
|
// Compare versions
|
||||||
const needsUpdate = compareVersions(currentVersion, simulatedLatestVersion) < 0;
|
const needsUpdate =
|
||||||
|
compareVersions(currentVersion, simulatedLatestVersion) < 0;
|
||||||
return {
|
|
||||||
currentVersion,
|
return {
|
||||||
latestVersion: simulatedLatestVersion,
|
currentVersion,
|
||||||
needsUpdate
|
latestVersion: simulatedLatestVersion,
|
||||||
};
|
needsUpdate
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with current version older than latest (should show update notice)
|
// Test with current version older than latest (should show update notice)
|
||||||
async function runTest() {
|
async function runTest() {
|
||||||
console.log('=== Testing version check scenarios ===\n');
|
console.log('=== Testing version check scenarios ===\n');
|
||||||
|
|
||||||
// Scenario 1: Update available
|
// Scenario 1: Update available
|
||||||
console.log('\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---');
|
console.log(
|
||||||
const updateInfo1 = await testCheckForUpdate('1.0.0');
|
'\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---'
|
||||||
console.log('Update check results:');
|
);
|
||||||
console.log(`- Current version: ${updateInfo1.currentVersion}`);
|
const updateInfo1 = await testCheckForUpdate('1.0.0');
|
||||||
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
|
console.log('Update check results:');
|
||||||
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
|
console.log(`- Current version: ${updateInfo1.currentVersion}`);
|
||||||
|
console.log(`- Latest version: ${updateInfo1.latestVersion}`);
|
||||||
if (updateInfo1.needsUpdate) {
|
console.log(`- Update needed: ${updateInfo1.needsUpdate}`);
|
||||||
console.log('\nDisplaying upgrade notification:');
|
|
||||||
displayUpgradeNotification(updateInfo1.currentVersion, updateInfo1.latestVersion);
|
if (updateInfo1.needsUpdate) {
|
||||||
}
|
console.log('\nDisplaying upgrade notification:');
|
||||||
|
displayUpgradeNotification(
|
||||||
// Scenario 2: No update needed (versions equal)
|
updateInfo1.currentVersion,
|
||||||
console.log('\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---');
|
updateInfo1.latestVersion
|
||||||
const updateInfo2 = await testCheckForUpdate('0.9.30');
|
);
|
||||||
console.log('Update check results:');
|
}
|
||||||
console.log(`- Current version: ${updateInfo2.currentVersion}`);
|
|
||||||
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
|
// Scenario 2: No update needed (versions equal)
|
||||||
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
|
console.log(
|
||||||
|
'\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---'
|
||||||
// Scenario 3: Development version (current newer than latest)
|
);
|
||||||
console.log('\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---');
|
const updateInfo2 = await testCheckForUpdate('0.9.30');
|
||||||
const updateInfo3 = await testCheckForUpdate('0.9.0');
|
console.log('Update check results:');
|
||||||
console.log('Update check results:');
|
console.log(`- Current version: ${updateInfo2.currentVersion}`);
|
||||||
console.log(`- Current version: ${updateInfo3.currentVersion}`);
|
console.log(`- Latest version: ${updateInfo2.latestVersion}`);
|
||||||
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
|
console.log(`- Update needed: ${updateInfo2.needsUpdate}`);
|
||||||
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
|
|
||||||
|
// Scenario 3: Development version (current newer than latest)
|
||||||
console.log('\n=== Test complete ===');
|
console.log(
|
||||||
|
'\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---'
|
||||||
|
);
|
||||||
|
const updateInfo3 = await testCheckForUpdate('0.9.0');
|
||||||
|
console.log('Update check results:');
|
||||||
|
console.log(`- Current version: ${updateInfo3.currentVersion}`);
|
||||||
|
console.log(`- Latest version: ${updateInfo3.latestVersion}`);
|
||||||
|
console.log(`- Update needed: ${updateInfo3.needsUpdate}`);
|
||||||
|
|
||||||
|
console.log('\n=== Test complete ===');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run all tests
|
// Run all tests
|
||||||
runTest();
|
runTest();
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
import { displayUpgradeNotification, compareVersions } from './scripts/modules/commands.js';
|
import {
|
||||||
|
displayUpgradeNotification,
|
||||||
|
compareVersions
|
||||||
|
} from './scripts/modules/commands.js';
|
||||||
|
|
||||||
// Simulate different version scenarios
|
// Simulate different version scenarios
|
||||||
console.log('=== Simulating version check ===\n');
|
console.log('=== Simulating version check ===\n');
|
||||||
@@ -8,15 +11,25 @@ console.log('Scenario 1: Current version older than latest');
|
|||||||
displayUpgradeNotification('0.9.30', '1.0.0');
|
displayUpgradeNotification('0.9.30', '1.0.0');
|
||||||
|
|
||||||
// 2. Current version same as latest (no update needed)
|
// 2. Current version same as latest (no update needed)
|
||||||
console.log('\nScenario 2: Current version same as latest (this would not normally show a notice)');
|
console.log(
|
||||||
|
'\nScenario 2: Current version same as latest (this would not normally show a notice)'
|
||||||
|
);
|
||||||
console.log('Current: 1.0.0, Latest: 1.0.0');
|
console.log('Current: 1.0.0, Latest: 1.0.0');
|
||||||
console.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));
|
console.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));
|
||||||
console.log('Update needed:', compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No');
|
console.log(
|
||||||
|
'Update needed:',
|
||||||
|
compareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No'
|
||||||
|
);
|
||||||
|
|
||||||
// 3. Current version newer than latest (e.g., development version, would not show notice)
|
// 3. Current version newer than latest (e.g., development version, would not show notice)
|
||||||
console.log('\nScenario 3: Current version newer than latest (this would not normally show a notice)');
|
console.log(
|
||||||
|
'\nScenario 3: Current version newer than latest (this would not normally show a notice)'
|
||||||
|
);
|
||||||
console.log('Current: 1.1.0, Latest: 1.0.0');
|
console.log('Current: 1.1.0, Latest: 1.0.0');
|
||||||
console.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));
|
console.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));
|
||||||
console.log('Update needed:', compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No');
|
console.log(
|
||||||
|
'Update needed:',
|
||||||
|
compareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No'
|
||||||
|
);
|
||||||
|
|
||||||
console.log('\n=== Test complete ===');
|
console.log('\n=== Test complete ===');
|
||||||
|
|||||||
@@ -60,4 +60,4 @@ We aim for at least 80% test coverage for all code paths. Coverage reports can b
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm run test:coverage
|
npm run test:coverage
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
{
|
{
|
||||||
"tasks": [
|
"tasks": [
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"subtasks": [
|
"subtasks": [
|
||||||
{
|
{
|
||||||
"id": 1,
|
"id": 1,
|
||||||
"dependencies": []
|
"dependencies": []
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
86
tests/fixtures/sample-claude-response.js
vendored
86
tests/fixtures/sample-claude-response.js
vendored
@@ -3,42 +3,50 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
export const sampleClaudeResponse = {
|
export const sampleClaudeResponse = {
|
||||||
tasks: [
|
tasks: [
|
||||||
{
|
{
|
||||||
id: 1,
|
id: 1,
|
||||||
title: "Setup Task Data Structure",
|
title: 'Setup Task Data Structure',
|
||||||
description: "Implement the core task data structure and file operations",
|
description: 'Implement the core task data structure and file operations',
|
||||||
status: "pending",
|
status: 'pending',
|
||||||
dependencies: [],
|
dependencies: [],
|
||||||
priority: "high",
|
priority: 'high',
|
||||||
details: "Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.",
|
details:
|
||||||
testStrategy: "Verify tasks.json is created with the correct structure and that task data can be read from and written to the file."
|
'Create the tasks.json file structure with support for task properties including ID, title, description, status, dependencies, priority, details, and test strategy. Implement file system operations for reading and writing task data.',
|
||||||
},
|
testStrategy:
|
||||||
{
|
'Verify tasks.json is created with the correct structure and that task data can be read from and written to the file.'
|
||||||
id: 2,
|
},
|
||||||
title: "Implement CLI Foundation",
|
{
|
||||||
description: "Create the command-line interface foundation with basic commands",
|
id: 2,
|
||||||
status: "pending",
|
title: 'Implement CLI Foundation',
|
||||||
dependencies: [1],
|
description:
|
||||||
priority: "high",
|
'Create the command-line interface foundation with basic commands',
|
||||||
details: "Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.",
|
status: 'pending',
|
||||||
testStrategy: "Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly."
|
dependencies: [1],
|
||||||
},
|
priority: 'high',
|
||||||
{
|
details:
|
||||||
id: 3,
|
'Set up Commander.js for handling CLI commands. Implement the basic command structure including help documentation. Create the foundational command parsing logic.',
|
||||||
title: "Develop Task Management Operations",
|
testStrategy:
|
||||||
description: "Implement core operations for creating, reading, updating, and deleting tasks",
|
'Test each command to ensure it properly parses arguments and options. Verify help documentation is displayed correctly.'
|
||||||
status: "pending",
|
},
|
||||||
dependencies: [1],
|
{
|
||||||
priority: "medium",
|
id: 3,
|
||||||
details: "Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.",
|
title: 'Develop Task Management Operations',
|
||||||
testStrategy: "Create unit tests for each CRUD operation to verify they correctly modify the task data."
|
description:
|
||||||
}
|
'Implement core operations for creating, reading, updating, and deleting tasks',
|
||||||
],
|
status: 'pending',
|
||||||
metadata: {
|
dependencies: [1],
|
||||||
projectName: "Task Management CLI",
|
priority: 'medium',
|
||||||
totalTasks: 3,
|
details:
|
||||||
sourceFile: "tests/fixtures/sample-prd.txt",
|
'Implement functions for listing tasks, adding new tasks, updating task status, and removing tasks. Include support for filtering tasks by status and other properties.',
|
||||||
generatedAt: "2023-12-15"
|
testStrategy:
|
||||||
}
|
'Create unit tests for each CRUD operation to verify they correctly modify the task data.'
|
||||||
};
|
}
|
||||||
|
],
|
||||||
|
metadata: {
|
||||||
|
projectName: 'Task Management CLI',
|
||||||
|
totalTasks: 3,
|
||||||
|
sourceFile: 'tests/fixtures/sample-prd.txt',
|
||||||
|
generatedAt: '2023-12-15'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|||||||
162
tests/fixtures/sample-tasks.js
vendored
162
tests/fixtures/sample-tasks.js
vendored
@@ -3,86 +3,88 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
export const sampleTasks = {
|
export const sampleTasks = {
|
||||||
meta: {
|
meta: {
|
||||||
projectName: "Test Project",
|
projectName: 'Test Project',
|
||||||
projectVersion: "1.0.0",
|
projectVersion: '1.0.0',
|
||||||
createdAt: "2023-01-01T00:00:00.000Z",
|
createdAt: '2023-01-01T00:00:00.000Z',
|
||||||
updatedAt: "2023-01-01T00:00:00.000Z"
|
updatedAt: '2023-01-01T00:00:00.000Z'
|
||||||
},
|
},
|
||||||
tasks: [
|
tasks: [
|
||||||
{
|
{
|
||||||
id: 1,
|
id: 1,
|
||||||
title: "Initialize Project",
|
title: 'Initialize Project',
|
||||||
description: "Set up the project structure and dependencies",
|
description: 'Set up the project structure and dependencies',
|
||||||
status: "done",
|
status: 'done',
|
||||||
dependencies: [],
|
dependencies: [],
|
||||||
priority: "high",
|
priority: 'high',
|
||||||
details: "Create directory structure, initialize package.json, and install dependencies",
|
details:
|
||||||
testStrategy: "Verify all directories and files are created correctly"
|
'Create directory structure, initialize package.json, and install dependencies',
|
||||||
},
|
testStrategy: 'Verify all directories and files are created correctly'
|
||||||
{
|
},
|
||||||
id: 2,
|
{
|
||||||
title: "Create Core Functionality",
|
id: 2,
|
||||||
description: "Implement the main features of the application",
|
title: 'Create Core Functionality',
|
||||||
status: "in-progress",
|
description: 'Implement the main features of the application',
|
||||||
dependencies: [1],
|
status: 'in-progress',
|
||||||
priority: "high",
|
dependencies: [1],
|
||||||
details: "Implement user authentication, data processing, and API endpoints",
|
priority: 'high',
|
||||||
testStrategy: "Write unit tests for all core functions",
|
details:
|
||||||
subtasks: [
|
'Implement user authentication, data processing, and API endpoints',
|
||||||
{
|
testStrategy: 'Write unit tests for all core functions',
|
||||||
id: 1,
|
subtasks: [
|
||||||
title: "Implement Authentication",
|
{
|
||||||
description: "Create user authentication system",
|
id: 1,
|
||||||
status: "done",
|
title: 'Implement Authentication',
|
||||||
dependencies: []
|
description: 'Create user authentication system',
|
||||||
},
|
status: 'done',
|
||||||
{
|
dependencies: []
|
||||||
id: 2,
|
},
|
||||||
title: "Set Up Database",
|
{
|
||||||
description: "Configure database connection and models",
|
id: 2,
|
||||||
status: "pending",
|
title: 'Set Up Database',
|
||||||
dependencies: [1]
|
description: 'Configure database connection and models',
|
||||||
}
|
status: 'pending',
|
||||||
]
|
dependencies: [1]
|
||||||
},
|
}
|
||||||
{
|
]
|
||||||
id: 3,
|
},
|
||||||
title: "Implement UI Components",
|
{
|
||||||
description: "Create the user interface components",
|
id: 3,
|
||||||
status: "pending",
|
title: 'Implement UI Components',
|
||||||
dependencies: [2],
|
description: 'Create the user interface components',
|
||||||
priority: "medium",
|
status: 'pending',
|
||||||
details: "Design and implement React components for the user interface",
|
dependencies: [2],
|
||||||
testStrategy: "Test components with React Testing Library",
|
priority: 'medium',
|
||||||
subtasks: [
|
details: 'Design and implement React components for the user interface',
|
||||||
{
|
testStrategy: 'Test components with React Testing Library',
|
||||||
id: 1,
|
subtasks: [
|
||||||
title: "Create Header Component",
|
{
|
||||||
description: "Implement the header component",
|
id: 1,
|
||||||
status: "pending",
|
title: 'Create Header Component',
|
||||||
dependencies: [],
|
description: 'Implement the header component',
|
||||||
details: "Create a responsive header with navigation links"
|
status: 'pending',
|
||||||
},
|
dependencies: [],
|
||||||
{
|
details: 'Create a responsive header with navigation links'
|
||||||
id: 2,
|
},
|
||||||
title: "Create Footer Component",
|
{
|
||||||
description: "Implement the footer component",
|
id: 2,
|
||||||
status: "pending",
|
title: 'Create Footer Component',
|
||||||
dependencies: [],
|
description: 'Implement the footer component',
|
||||||
details: "Create a footer with copyright information and links"
|
status: 'pending',
|
||||||
}
|
dependencies: [],
|
||||||
]
|
details: 'Create a footer with copyright information and links'
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
};
|
};
|
||||||
|
|
||||||
export const emptySampleTasks = {
|
export const emptySampleTasks = {
|
||||||
meta: {
|
meta: {
|
||||||
projectName: "Empty Project",
|
projectName: 'Empty Project',
|
||||||
projectVersion: "1.0.0",
|
projectVersion: '1.0.0',
|
||||||
createdAt: "2023-01-01T00:00:00.000Z",
|
createdAt: '2023-01-01T00:00:00.000Z',
|
||||||
updatedAt: "2023-01-01T00:00:00.000Z"
|
updatedAt: '2023-01-01T00:00:00.000Z'
|
||||||
},
|
},
|
||||||
tasks: []
|
tasks: []
|
||||||
};
|
};
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user