Compare commits
51 Commits
v0.13.1
...
add-comple
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
834dfb86ac | ||
|
|
b984af0606 | ||
|
|
a8dabf4485 | ||
|
|
da317f2607 | ||
|
|
ed17cb0e0a | ||
|
|
e96734a6cc | ||
|
|
17294ff259 | ||
|
|
a96215a359 | ||
|
|
0a611843b5 | ||
|
|
a1f8d52474 | ||
|
|
c47deeb869 | ||
|
|
dd90c9cb5d | ||
|
|
c7042845d6 | ||
|
|
efce37469b | ||
|
|
4117f71c18 | ||
|
|
d7ebfe30fc | ||
|
|
126abb9631 | ||
|
|
e917fd16c0 | ||
|
|
07a710d88e | ||
|
|
0ca41443de | ||
|
|
09d839fff5 | ||
|
|
90068348d3 | ||
|
|
02e347d2d7 | ||
|
|
0527c363e3 | ||
|
|
735135efe9 | ||
|
|
4fee667a05 | ||
|
|
01963af2cb | ||
|
|
7c543cd8c3 | ||
|
|
c2865b81f8 | ||
|
|
a0ac50ffd7 | ||
|
|
b6b0dd1e29 | ||
|
|
0f37cf0851 | ||
|
|
abb5063b3e | ||
|
|
6b0ec458e8 | ||
|
|
8047ec756c | ||
|
|
be8fe8092f | ||
|
|
33d2569ace | ||
|
|
fdbb25e185 | ||
|
|
deaf4a6ff4 | ||
|
|
3628acab78 | ||
|
|
463de0035c | ||
|
|
fffcc5a89d | ||
|
|
11506ddc0e | ||
|
|
dcb3f2f9f9 | ||
|
|
e045a5268c | ||
|
|
8911bf4d49 | ||
|
|
5e5e20391a | ||
|
|
521cf0e5f0 | ||
|
|
92bd0e4395 | ||
|
|
3162ac49ec | ||
|
|
b98af1541e |
5
.changeset/beige-doodles-type.md
Normal file
5
.changeset/beige-doodles-type.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Resolve all issues related to MCP
|
||||
9
.changeset/floppy-plants-marry.md
Normal file
9
.changeset/floppy-plants-marry.md
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix CLI --force flag for parse-prd command
|
||||
|
||||
Previously, the --force flag was not respected when running `parse-prd`, causing the command to prompt for confirmation or fail even when --force was provided. This patch ensures that the flag is correctly passed and handled, allowing users to overwrite existing tasks.json files as intended.
|
||||
|
||||
- Fixes #477
|
||||
5
.changeset/forty-plums-stay.md
Normal file
5
.changeset/forty-plums-stay.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider.
|
||||
5
.changeset/many-wasps-sell.md
Normal file
5
.changeset/many-wasps-sell.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Task Master no longer tells you to update when you're already up to date
|
||||
12
.changeset/pre.json
Normal file
12
.changeset/pre.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"mode": "exit",
|
||||
"tag": "rc",
|
||||
"initialVersions": {
|
||||
"task-master-ai": "0.13.2"
|
||||
},
|
||||
"changesets": [
|
||||
"beige-doodles-type",
|
||||
"red-oranges-attend",
|
||||
"red-suns-wash"
|
||||
]
|
||||
}
|
||||
5
.changeset/red-oranges-attend.md
Normal file
5
.changeset/red-oranges-attend.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix ERR_MODULE_NOT_FOUND when trying to run MCP Server
|
||||
5
.changeset/red-suns-wash.md
Normal file
5
.changeset/red-suns-wash.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Add src directory to exports
|
||||
5
.changeset/sharp-dingos-melt.md
Normal file
5
.changeset/sharp-dingos-melt.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix the error handling of task status settings
|
||||
7
.changeset/six-cloths-happen.md
Normal file
7
.changeset/six-cloths-happen.md
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Remove caching layer from MCP direct functions for task listing, next task, and complexity report
|
||||
|
||||
- Fixes issues users where having where they were getting stale data
|
||||
5
.changeset/slow-singers-swim.md
Normal file
5
.changeset/slow-singers-swim.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix for issue #409 LOG_LEVEL Pydantic validation error
|
||||
5
.changeset/social-masks-fold.md
Normal file
5
.changeset/social-masks-fold.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
Display task complexity scores in task lists, next task, and task details views.
|
||||
7
.changeset/soft-zoos-flow.md
Normal file
7
.changeset/soft-zoos-flow.md
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix initial .env.example to work out of the box
|
||||
|
||||
- Closes #419
|
||||
5
.changeset/ten-ways-mate.md
Normal file
5
.changeset/ten-ways-mate.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix default fallback model and maxTokens in Taskmaster initialization
|
||||
5
.changeset/tricky-wombats-spend.md
Normal file
5
.changeset/tricky-wombats-spend.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix bug when updating tasks on the MCP server (#412)
|
||||
11
.changeset/wide-eyes-relax.md
Normal file
11
.changeset/wide-eyes-relax.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix duplicate output on CLI help screen
|
||||
|
||||
- Prevent the Task Master CLI from printing the help screen more than once when using `-h` or `--help`.
|
||||
- Removed redundant manual event handlers and guards for help output; now only the Commander `.helpInformation` override is used for custom help.
|
||||
- Simplified logic so that help is only shown once for both "no arguments" and help flag flows.
|
||||
- Ensures a clean, branded help experience with no repeated content.
|
||||
- Fixes #339
|
||||
@@ -116,7 +116,7 @@ Taskmaster configuration is managed through two main mechanisms:
|
||||
* For MCP/Cursor integration, configure these keys in the `env` section of `.cursor/mcp.json`.
|
||||
* Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.mdc`).
|
||||
|
||||
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
|
||||
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
|
||||
**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.cursor/mcp.json`.
|
||||
**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.
|
||||
|
||||
|
||||
62
.github/workflows/pre-release.yml
vendored
Normal file
62
.github/workflows/pre-release.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Pre-Release (RC)
|
||||
|
||||
on:
|
||||
workflow_dispatch: # Allows manual triggering from GitHub UI/API
|
||||
|
||||
concurrency: pre-release-${{ github.ref }}
|
||||
|
||||
jobs:
|
||||
rc:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
*/*/node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Enter RC mode
|
||||
run: |
|
||||
npx changeset pre exit || true
|
||||
npx changeset pre enter rc
|
||||
|
||||
- name: Version RC packages
|
||||
run: npx changeset version
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Create Release Candidate Pull Request or Publish Release Candidate to npm
|
||||
uses: changesets/action@v1
|
||||
with:
|
||||
publish: npm run release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
- name: Exit RC mode
|
||||
run: npx changeset pre exit
|
||||
|
||||
- name: Commit & Push changes
|
||||
uses: actions-js/push@master
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
message: 'chore: rc version bump'
|
||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -33,6 +33,9 @@ jobs:
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Exit pre-release mode (safety check)
|
||||
run: npx changeset pre exit || true
|
||||
|
||||
- name: Create Release Pull Request or Publish to npm
|
||||
uses: changesets/action@v1
|
||||
with:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -61,3 +61,6 @@ dist
|
||||
*.debug
|
||||
init-debug.log
|
||||
dev-debug.log
|
||||
|
||||
# NPMRC
|
||||
.npmrc
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3.5-sonnet-20240620",
|
||||
"maxTokens": 120000,
|
||||
"modelId": "claude-3-5-sonnet-20240620",
|
||||
"maxTokens": 8192,
|
||||
"temperature": 0.1
|
||||
}
|
||||
},
|
||||
|
||||
@@ -198,7 +198,7 @@ alwaysApply: true
|
||||
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
|
||||
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
|
||||
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
|
||||
- **LOG_LEVEL** (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`)
|
||||
- **TASKMASTER_LOG_LEVEL** (Default: `"info"`): Console output level (Example: `TASKMASTER_LOG_LEVEL=debug`)
|
||||
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
|
||||
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
|
||||
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# API Keys (Required to enable respective provider)
|
||||
ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-...
|
||||
PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-...
|
||||
OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
|
||||
GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models.
|
||||
MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models.
|
||||
XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models.
|
||||
AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
||||
ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Required: Format: sk-ant-api03-...
|
||||
PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Optional: Format: pplx-...
|
||||
OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
|
||||
GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models.
|
||||
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
|
||||
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
|
||||
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
||||
@@ -31,7 +31,7 @@ Task Master configuration is now managed through two primary methods:
|
||||
- Create a `.env` file in your project root for CLI usage.
|
||||
- See `assets/env.example` for required key names.
|
||||
|
||||
**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.
|
||||
**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `TASKMASTER_LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.
|
||||
|
||||
## How It Works
|
||||
|
||||
@@ -42,7 +42,7 @@ Task Master configuration is now managed through two primary methods:
|
||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||
|
||||
2. **CLI Commands**
|
||||
2. **CLI Commands**
|
||||
You can run the commands via:
|
||||
|
||||
```bash
|
||||
@@ -200,7 +200,7 @@ Notes:
|
||||
|
||||
## Logging
|
||||
|
||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
||||
The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:
|
||||
|
||||
- `debug`: Detailed information, typically useful for troubleshooting
|
||||
- `info`: Confirmation that things are working as expected (default)
|
||||
|
||||
@@ -15,13 +15,15 @@ Taskmaster uses two primary methods for configuration:
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2
|
||||
"temperature": 0.2,
|
||||
"baseUrl": "https://api.anthropic.com/v1"
|
||||
},
|
||||
"research": {
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1
|
||||
"temperature": 0.1,
|
||||
"baseUrl": "https://api.perplexity.ai/v1"
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
@@ -56,8 +58,9 @@ Taskmaster uses two primary methods for configuration:
|
||||
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
||||
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
||||
- `XAI_API_KEY`: Your X-AI API key.
|
||||
- **Optional Endpoint Overrides (in .taskmasterconfig):**
|
||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key.
|
||||
- **Optional Endpoint Overrides:**
|
||||
- **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role).
|
||||
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
||||
|
||||
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
||||
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
||||
@@ -86,30 +85,20 @@ export async function complexityReportDirect(args, log) {
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreActionFn,
|
||||
log
|
||||
});
|
||||
log.info(
|
||||
`complexityReportDirect completed. From cache: ${result.fromCache}`
|
||||
);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
const result = await coreActionFn();
|
||||
log.info('complexityReportDirect completed');
|
||||
return result;
|
||||
} catch (error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself
|
||||
// Ensure silent mode is disabled
|
||||
disableSilentMode();
|
||||
|
||||
log.error(
|
||||
`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`
|
||||
);
|
||||
log.error(`Unexpected error during complexityReport: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
},
|
||||
fromCache: false
|
||||
}
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
*/
|
||||
|
||||
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import {
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
@@ -19,7 +18,7 @@ import {
|
||||
*/
|
||||
export async function listTasksDirect(args, log) {
|
||||
// Destructure the explicit tasksJsonPath from args
|
||||
const { tasksJsonPath, status, withSubtasks } = args;
|
||||
const { tasksJsonPath, reportPath, status, withSubtasks } = args;
|
||||
|
||||
if (!tasksJsonPath) {
|
||||
log.error('listTasksDirect called without tasksJsonPath');
|
||||
@@ -36,7 +35,6 @@ export async function listTasksDirect(args, log) {
|
||||
// Use the explicit tasksJsonPath for cache key
|
||||
const statusFilter = status || 'all';
|
||||
const withSubtasksFilter = withSubtasks || false;
|
||||
const cacheKey = `listTasks:${tasksJsonPath}:${statusFilter}:${withSubtasksFilter}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreListTasksAction = async () => {
|
||||
@@ -51,6 +49,7 @@ export async function listTasksDirect(args, log) {
|
||||
const resultData = listTasks(
|
||||
tasksJsonPath,
|
||||
statusFilter,
|
||||
reportPath,
|
||||
withSubtasksFilter,
|
||||
'json'
|
||||
);
|
||||
@@ -65,6 +64,7 @@ export async function listTasksDirect(args, log) {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
log.info(
|
||||
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
|
||||
);
|
||||
@@ -88,25 +88,19 @@ export async function listTasksDirect(args, log) {
|
||||
}
|
||||
};
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreListTasksAction,
|
||||
log
|
||||
});
|
||||
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
const result = await coreListTasksAction();
|
||||
log.info('listTasksDirect completed');
|
||||
return result;
|
||||
} catch (error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
||||
log.error(
|
||||
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
|
||||
);
|
||||
log.error(`Unexpected error during listTasks: ${error.message}`);
|
||||
console.error(error.stack);
|
||||
return {
|
||||
success: false,
|
||||
error: { code: 'CACHE_UTIL_ERROR', message: error.message },
|
||||
fromCache: false
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
*/
|
||||
|
||||
import { findNextTask } from '../../../../scripts/modules/task-manager.js';
|
||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import {
|
||||
readJSON,
|
||||
readComplexityReport
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
import {
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
@@ -21,7 +23,7 @@ import {
|
||||
*/
|
||||
export async function nextTaskDirect(args, log) {
|
||||
// Destructure expected args
|
||||
const { tasksJsonPath } = args;
|
||||
const { tasksJsonPath, reportPath } = args;
|
||||
|
||||
if (!tasksJsonPath) {
|
||||
log.error('nextTaskDirect called without tasksJsonPath');
|
||||
@@ -35,9 +37,6 @@ export async function nextTaskDirect(args, log) {
|
||||
};
|
||||
}
|
||||
|
||||
// Generate cache key using the provided task path
|
||||
const cacheKey = `nextTask:${tasksJsonPath}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreNextTaskAction = async () => {
|
||||
try {
|
||||
@@ -59,8 +58,11 @@ export async function nextTaskDirect(args, log) {
|
||||
};
|
||||
}
|
||||
|
||||
// Read the complexity report
|
||||
const complexityReport = readComplexityReport(reportPath);
|
||||
|
||||
// Find the next task
|
||||
const nextTask = findNextTask(data.tasks);
|
||||
const nextTask = findNextTask(data.tasks, complexityReport);
|
||||
|
||||
if (!nextTask) {
|
||||
log.info(
|
||||
@@ -118,18 +120,11 @@ export async function nextTaskDirect(args, log) {
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreNextTaskAction,
|
||||
log
|
||||
});
|
||||
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
const result = await coreNextTaskAction();
|
||||
log.info(`nextTaskDirect completed.`);
|
||||
return result;
|
||||
} catch (error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself
|
||||
log.error(
|
||||
`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`
|
||||
);
|
||||
log.error(`Unexpected error during nextTask: ${error.message}`);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
|
||||
@@ -3,11 +3,10 @@
|
||||
* Direct function implementation for showing task details
|
||||
*/
|
||||
|
||||
import { findTaskById, readJSON } from '../../../../scripts/modules/utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import {
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
findTaskById,
|
||||
readComplexityReport,
|
||||
readJSON
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
|
||||
@@ -17,6 +16,7 @@ import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
* @param {Object} args - Command arguments.
|
||||
* @param {string} args.id - Task ID to show.
|
||||
* @param {string} [args.file] - Optional path to the tasks file (passed to findTasksJsonPath).
|
||||
* @param {string} args.reportPath - Explicit path to the complexity report file.
|
||||
* @param {string} [args.status] - Optional status to filter subtasks by.
|
||||
* @param {string} args.projectRoot - Absolute path to the project root directory (already normalized by tool).
|
||||
* @param {Object} log - Logger object.
|
||||
@@ -27,7 +27,7 @@ export async function showTaskDirect(args, log) {
|
||||
// Destructure session from context if needed later, otherwise ignore
|
||||
// const { session } = context;
|
||||
// Destructure projectRoot and other args. projectRoot is assumed normalized.
|
||||
const { id, file, status, projectRoot } = args;
|
||||
const { id, file, reportPath, status, projectRoot } = args;
|
||||
|
||||
log.info(
|
||||
`Showing task direct function. ID: ${id}, File: ${file}, Status Filter: ${status}, ProjectRoot: ${projectRoot}`
|
||||
@@ -64,9 +64,12 @@ export async function showTaskDirect(args, log) {
|
||||
};
|
||||
}
|
||||
|
||||
const complexityReport = readComplexityReport(reportPath);
|
||||
|
||||
const { task, originalSubtaskCount } = findTaskById(
|
||||
tasksData.tasks,
|
||||
id,
|
||||
complexityReport,
|
||||
status
|
||||
);
|
||||
|
||||
|
||||
@@ -6,6 +6,10 @@
|
||||
import path from 'path';
|
||||
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
||||
import { createLogWrapper } from '../../tools/utils.js';
|
||||
import {
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for updating tasks based on new context.
|
||||
|
||||
@@ -339,6 +339,49 @@ export function findPRDDocumentPath(projectRoot, explicitPath, log) {
|
||||
return null;
|
||||
}
|
||||
|
||||
export function findComplexityReportPath(projectRoot, explicitPath, log) {
|
||||
// If explicit path is provided, check if it exists
|
||||
if (explicitPath) {
|
||||
const fullPath = path.isAbsolute(explicitPath)
|
||||
? explicitPath
|
||||
: path.resolve(projectRoot, explicitPath);
|
||||
|
||||
if (fs.existsSync(fullPath)) {
|
||||
log.info(`Using provided PRD document path: ${fullPath}`);
|
||||
return fullPath;
|
||||
} else {
|
||||
log.warn(
|
||||
`Provided PRD document path not found: ${fullPath}, will search for alternatives`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Common locations and file patterns for PRD documents
|
||||
const commonLocations = [
|
||||
'', // Project root
|
||||
'scripts/'
|
||||
];
|
||||
|
||||
const commonFileNames = [
|
||||
'complexity-report.json',
|
||||
'task-complexity-report.json'
|
||||
];
|
||||
|
||||
// Check all possible combinations
|
||||
for (const location of commonLocations) {
|
||||
for (const fileName of commonFileNames) {
|
||||
const potentialPath = path.join(projectRoot, location, fileName);
|
||||
if (fs.existsSync(potentialPath)) {
|
||||
log.info(`Found PRD document at: ${potentialPath}`);
|
||||
return potentialPath;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.warn(`No PRD document found in common locations within ${projectRoot}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the tasks output directory path
|
||||
* @param {string} projectRoot - The project root directory
|
||||
|
||||
@@ -10,7 +10,10 @@ import {
|
||||
withNormalizedProjectRoot
|
||||
} from './utils.js';
|
||||
import { showTaskDirect } from '../core/task-master-core.js';
|
||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
import {
|
||||
findTasksJsonPath,
|
||||
findComplexityReportPath
|
||||
} from '../core/utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Custom processor function that removes allTasks from the response
|
||||
@@ -50,6 +53,12 @@ export function registerShowTaskTool(server) {
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Path to the tasks file relative to project root'),
|
||||
complexityReport: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Path to the complexity report file (relative to project root or absolute)'
|
||||
),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
@@ -81,9 +90,22 @@ export function registerShowTaskTool(server) {
|
||||
}
|
||||
|
||||
// Call the direct function, passing the normalized projectRoot
|
||||
// Resolve the path to complexity report
|
||||
let complexityReportPath;
|
||||
try {
|
||||
complexityReportPath = findComplexityReportPath(
|
||||
projectRoot,
|
||||
args.complexityReport,
|
||||
log
|
||||
);
|
||||
} catch (error) {
|
||||
log.error(`Error finding complexity report: ${error.message}`);
|
||||
}
|
||||
const result = await showTaskDirect(
|
||||
{
|
||||
tasksJsonPath: tasksJsonPath,
|
||||
reportPath: complexityReportPath,
|
||||
// Pass other relevant args
|
||||
id: id,
|
||||
status: status,
|
||||
projectRoot: projectRoot
|
||||
|
||||
@@ -10,7 +10,10 @@ import {
|
||||
withNormalizedProjectRoot
|
||||
} from './utils.js';
|
||||
import { listTasksDirect } from '../core/task-master-core.js';
|
||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
import {
|
||||
findTasksJsonPath,
|
||||
findComplexityReportPath
|
||||
} from '../core/utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Register the getTasks tool with the MCP server
|
||||
@@ -38,6 +41,12 @@ export function registerListTasksTool(server) {
|
||||
.describe(
|
||||
'Path to the tasks file (relative to project root or absolute)'
|
||||
),
|
||||
complexityReport: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Path to the complexity report file (relative to project root or absolute)'
|
||||
),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe('The directory of the project. Must be an absolute path.')
|
||||
@@ -60,11 +69,23 @@ export function registerListTasksTool(server) {
|
||||
);
|
||||
}
|
||||
|
||||
// Resolve the path to complexity report
|
||||
let complexityReportPath;
|
||||
try {
|
||||
complexityReportPath = findComplexityReportPath(
|
||||
args.projectRoot,
|
||||
args.complexityReport,
|
||||
log
|
||||
);
|
||||
} catch (error) {
|
||||
log.error(`Error finding complexity report: ${error.message}`);
|
||||
}
|
||||
const result = await listTasksDirect(
|
||||
{
|
||||
tasksJsonPath: tasksJsonPath,
|
||||
status: args.status,
|
||||
withSubtasks: args.withSubtasks
|
||||
withSubtasks: args.withSubtasks,
|
||||
reportPath: complexityReportPath
|
||||
},
|
||||
log
|
||||
);
|
||||
|
||||
@@ -10,7 +10,10 @@ import {
|
||||
withNormalizedProjectRoot
|
||||
} from './utils.js';
|
||||
import { nextTaskDirect } from '../core/task-master-core.js';
|
||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
import {
|
||||
findTasksJsonPath,
|
||||
findComplexityReportPath
|
||||
} from '../core/utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Register the next-task tool with the MCP server
|
||||
@@ -23,6 +26,12 @@ export function registerNextTaskTool(server) {
|
||||
'Find the next task to work on based on dependencies and status',
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
||||
complexityReport: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Path to the complexity report file (relative to project root or absolute)'
|
||||
),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe('The directory of the project. Must be an absolute path.')
|
||||
@@ -45,9 +54,21 @@ export function registerNextTaskTool(server) {
|
||||
);
|
||||
}
|
||||
|
||||
// Resolve the path to complexity report
|
||||
let complexityReportPath;
|
||||
try {
|
||||
complexityReportPath = findComplexityReportPath(
|
||||
args.projectRoot,
|
||||
args.complexityReport,
|
||||
log
|
||||
);
|
||||
} catch (error) {
|
||||
log.error(`Error finding complexity report: ${error.message}`);
|
||||
}
|
||||
const result = await nextTaskDirect(
|
||||
{
|
||||
tasksJsonPath: tasksJsonPath
|
||||
tasksJsonPath: tasksJsonPath,
|
||||
reportPath: complexityReportPath
|
||||
},
|
||||
log
|
||||
);
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
} from './utils.js';
|
||||
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';
|
||||
|
||||
/**
|
||||
* Register the setTaskStatus tool with the MCP server
|
||||
@@ -27,7 +28,7 @@ export function registerSetTaskStatusTool(server) {
|
||||
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
||||
),
|
||||
status: z
|
||||
.string()
|
||||
.enum(TASK_STATUS_OPTIONS)
|
||||
.describe(
|
||||
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
||||
),
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.13.0",
|
||||
"version": "0.13.2",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "task-master-ai",
|
||||
"version": "0.13.0",
|
||||
"version": "0.13.2",
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "^1.2.10",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.13.1",
|
||||
"version": "0.13.2",
|
||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||
"main": "index.js",
|
||||
"type": "module",
|
||||
@@ -82,15 +82,14 @@
|
||||
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
|
||||
},
|
||||
"files": [
|
||||
"scripts/init.js",
|
||||
"scripts/dev.js",
|
||||
"scripts/modules/**",
|
||||
"scripts/**",
|
||||
"assets/**",
|
||||
".cursor/**",
|
||||
"README-task-master.md",
|
||||
"index.js",
|
||||
"bin/**",
|
||||
"mcp-server/**"
|
||||
"mcp-server/**",
|
||||
"src/**"
|
||||
],
|
||||
"overrides": {
|
||||
"node-fetch": "^3.3.2",
|
||||
|
||||
@@ -32,7 +32,7 @@ The script can be configured through environment variables in a `.env` file at t
|
||||
- `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation
|
||||
- `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online")
|
||||
- `DEBUG`: Enable debug logging (default: false)
|
||||
- `LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
|
||||
- `TASKMASTER_LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
|
||||
- `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3)
|
||||
- `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium)
|
||||
- `PROJECT_NAME`: Override default project name in tasks.json
|
||||
@@ -47,7 +47,7 @@ The script can be configured through environment variables in a `.env` file at t
|
||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||
|
||||
2. **Script Commands**
|
||||
2. **Script Commands**
|
||||
You can run the script via:
|
||||
|
||||
```bash
|
||||
@@ -225,7 +225,7 @@ To use the Perplexity integration:
|
||||
|
||||
## Logging
|
||||
|
||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
||||
The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:
|
||||
|
||||
- `debug`: Detailed information, typically useful for troubleshooting
|
||||
- `info`: Confirmation that things are working as expected (default)
|
||||
|
||||
@@ -38,10 +38,10 @@ const LOG_LEVELS = {
|
||||
success: 4
|
||||
};
|
||||
|
||||
// Get log level from environment or default to info
|
||||
const LOG_LEVEL = process.env.LOG_LEVEL
|
||||
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
|
||||
: LOG_LEVELS.info;
|
||||
// Determine log level from environment variable or default to 'info'
|
||||
const LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL
|
||||
? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()]
|
||||
: LOG_LEVELS.info; // Default to info
|
||||
|
||||
// Create a color gradient for the banner
|
||||
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
||||
|
||||
@@ -14,7 +14,8 @@ import {
|
||||
getResearchModelId,
|
||||
getFallbackProvider,
|
||||
getFallbackModelId,
|
||||
getParametersForRole
|
||||
getParametersForRole,
|
||||
getBaseUrlForRole
|
||||
} from './config-manager.js';
|
||||
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
||||
|
||||
@@ -284,7 +285,13 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
'AI service call failed for all configured roles.';
|
||||
|
||||
for (const currentRole of sequence) {
|
||||
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
|
||||
let providerName,
|
||||
modelId,
|
||||
apiKey,
|
||||
roleParams,
|
||||
providerFnSet,
|
||||
providerApiFn,
|
||||
baseUrl;
|
||||
|
||||
try {
|
||||
log('info', `New AI service call with role: ${currentRole}`);
|
||||
@@ -325,6 +332,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
|
||||
// Pass effectiveProjectRoot to getParametersForRole
|
||||
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
||||
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
|
||||
|
||||
// 2. Get Provider Function Set
|
||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||
@@ -401,6 +409,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
maxTokens: roleParams.maxTokens,
|
||||
temperature: roleParams.temperature,
|
||||
messages,
|
||||
baseUrl,
|
||||
...(serviceType === 'generateObject' && { schema, objectName }),
|
||||
...restApiParams
|
||||
};
|
||||
|
||||
@@ -73,7 +73,11 @@ import {
|
||||
getApiKeyStatusReport
|
||||
} from './task-manager/models.js';
|
||||
import { findProjectRoot } from './utils.js';
|
||||
|
||||
import {
|
||||
isValidTaskStatus,
|
||||
TASK_STATUS_OPTIONS
|
||||
} from '../../src/constants/task-status.js';
|
||||
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||
/**
|
||||
* Runs the interactive setup process for model configuration.
|
||||
* @param {string|null} projectRoot - The resolved project root directory.
|
||||
@@ -486,11 +490,6 @@ function registerCommands(programInstance) {
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// Default help
|
||||
programInstance.on('--help', function () {
|
||||
displayHelp();
|
||||
});
|
||||
|
||||
// parse-prd command
|
||||
programInstance
|
||||
.command('parse-prd')
|
||||
@@ -515,7 +514,7 @@ function registerCommands(programInstance) {
|
||||
const outputPath = options.output;
|
||||
const force = options.force || false;
|
||||
const append = options.append || false;
|
||||
let useForce = false;
|
||||
let useForce = force;
|
||||
let useAppend = false;
|
||||
|
||||
// Helper function to check if tasks.json exists and confirm overwrite
|
||||
@@ -609,7 +608,7 @@ function registerCommands(programInstance) {
|
||||
spinner = ora('Parsing PRD and generating tasks...').start();
|
||||
await parsePRD(inputFile, outputPath, numTasks, {
|
||||
append: useAppend,
|
||||
force: useForce
|
||||
useForce
|
||||
});
|
||||
spinner.succeed('Tasks generated successfully!');
|
||||
} catch (error) {
|
||||
@@ -1038,7 +1037,7 @@ function registerCommands(programInstance) {
|
||||
)
|
||||
.option(
|
||||
'-s, --status <status>',
|
||||
'New status (todo, in-progress, review, done)'
|
||||
`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})`
|
||||
)
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.action(async (options) => {
|
||||
@@ -1051,6 +1050,16 @@ function registerCommands(programInstance) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!isValidTaskStatus(status)) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
)
|
||||
);
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(
|
||||
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
|
||||
);
|
||||
@@ -1063,10 +1072,16 @@ function registerCommands(programInstance) {
|
||||
.command('list')
|
||||
.description('List all tasks')
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option(
|
||||
'-r, --report <report>',
|
||||
'Path to the complexity report file',
|
||||
'scripts/task-complexity-report.json'
|
||||
)
|
||||
.option('-s, --status <status>', 'Filter by status')
|
||||
.option('--with-subtasks', 'Show subtasks for each task')
|
||||
.action(async (options) => {
|
||||
const tasksPath = options.file;
|
||||
const reportPath = options.report;
|
||||
const statusFilter = options.status;
|
||||
const withSubtasks = options.withSubtasks || false;
|
||||
|
||||
@@ -1078,7 +1093,7 @@ function registerCommands(programInstance) {
|
||||
console.log(chalk.blue('Including subtasks in listing'));
|
||||
}
|
||||
|
||||
await listTasks(tasksPath, statusFilter, withSubtasks);
|
||||
await listTasks(tasksPath, statusFilter, reportPath, withSubtasks);
|
||||
});
|
||||
|
||||
// expand command
|
||||
@@ -1278,10 +1293,6 @@ function registerCommands(programInstance) {
|
||||
'--details <details>',
|
||||
'Implementation details (for manual task creation)'
|
||||
)
|
||||
.option(
|
||||
'--test-strategy <testStrategy>',
|
||||
'Test strategy (for manual task creation)'
|
||||
)
|
||||
.option(
|
||||
'--dependencies <dependencies>',
|
||||
'Comma-separated list of task IDs this task depends on'
|
||||
@@ -1388,9 +1399,15 @@ function registerCommands(programInstance) {
|
||||
`Show the next task to work on based on dependencies and status${chalk.reset('')}`
|
||||
)
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option(
|
||||
'-r, --report <report>',
|
||||
'Path to the complexity report file',
|
||||
'scripts/task-complexity-report.json'
|
||||
)
|
||||
.action(async (options) => {
|
||||
const tasksPath = options.file;
|
||||
await displayNextTask(tasksPath);
|
||||
const reportPath = options.report;
|
||||
await displayNextTask(tasksPath, reportPath);
|
||||
});
|
||||
|
||||
// show command
|
||||
@@ -1403,6 +1420,11 @@ function registerCommands(programInstance) {
|
||||
.option('-i, --id <id>', 'Task ID to show')
|
||||
.option('-s, --status <status>', 'Filter subtasks by status') // ADDED status option
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option(
|
||||
'-r, --report <report>',
|
||||
'Path to the complexity report file',
|
||||
'scripts/task-complexity-report.json'
|
||||
)
|
||||
.action(async (taskId, options) => {
|
||||
const idArg = taskId || options.id;
|
||||
const statusFilter = options.status; // ADDED: Capture status filter
|
||||
@@ -1413,8 +1435,9 @@ function registerCommands(programInstance) {
|
||||
}
|
||||
|
||||
const tasksPath = options.file;
|
||||
const reportPath = options.report;
|
||||
// PASS statusFilter to the display function
|
||||
await displayTaskById(tasksPath, idArg, statusFilter);
|
||||
await displayTaskById(tasksPath, idArg, reportPath, statusFilter);
|
||||
});
|
||||
|
||||
// add-dependency command
|
||||
@@ -1663,6 +1686,7 @@ function registerCommands(programInstance) {
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
showAddSubtaskHelp();
|
||||
process.exit(1);
|
||||
}
|
||||
})
|
||||
@@ -2366,14 +2390,7 @@ function setupCLI() {
|
||||
return 'unknown'; // Default fallback if package.json fails
|
||||
})
|
||||
.helpOption('-h, --help', 'Display help')
|
||||
.addHelpCommand(false) // Disable default help command
|
||||
.on('--help', () => {
|
||||
displayHelp(); // Use your custom help display instead
|
||||
})
|
||||
.on('-h', () => {
|
||||
displayHelp();
|
||||
process.exit(0);
|
||||
});
|
||||
.addHelpCommand(false); // Disable default help command
|
||||
|
||||
// Modify the help option to use your custom display
|
||||
programInstance.helpInformation = () => {
|
||||
@@ -2393,28 +2410,7 @@ function setupCLI() {
|
||||
*/
|
||||
async function checkForUpdate() {
|
||||
// Get current version from package.json ONLY
|
||||
let currentVersion = 'unknown'; // Initialize with a default
|
||||
try {
|
||||
// Try to get the version from the installed package (if applicable) or current dir
|
||||
let packageJsonPath = path.join(
|
||||
process.cwd(),
|
||||
'node_modules',
|
||||
'task-master-ai',
|
||||
'package.json'
|
||||
);
|
||||
// Fallback to current directory package.json if not found in node_modules
|
||||
if (!fs.existsSync(packageJsonPath)) {
|
||||
packageJsonPath = path.join(process.cwd(), 'package.json');
|
||||
}
|
||||
|
||||
if (fs.existsSync(packageJsonPath)) {
|
||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
||||
currentVersion = packageJson.version;
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently fail and use default
|
||||
log('debug', `Error reading current package version: ${error.message}`);
|
||||
}
|
||||
const currentVersion = getTaskMasterVersion();
|
||||
|
||||
return new Promise((resolve) => {
|
||||
// Get the latest version from npm registry
|
||||
|
||||
@@ -677,6 +677,13 @@ function getAllProviders() {
|
||||
return Object.keys(MODEL_MAP || {});
|
||||
}
|
||||
|
||||
function getBaseUrlForRole(role, explicitRoot = null) {
|
||||
const roleConfig = getModelConfigForRole(role, explicitRoot);
|
||||
return roleConfig && typeof roleConfig.baseUrl === 'string'
|
||||
? roleConfig.baseUrl
|
||||
: undefined;
|
||||
}
|
||||
|
||||
export {
|
||||
// Core config access
|
||||
getConfig,
|
||||
@@ -704,6 +711,7 @@ export {
|
||||
getFallbackModelId,
|
||||
getFallbackMaxTokens,
|
||||
getFallbackTemperature,
|
||||
getBaseUrlForRole,
|
||||
|
||||
// Global setting getters (No env var overrides)
|
||||
getLogLevel,
|
||||
|
||||
@@ -23,7 +23,7 @@ import updateSubtaskById from './task-manager/update-subtask-by-id.js';
|
||||
import removeTask from './task-manager/remove-task.js';
|
||||
import taskExists from './task-manager/task-exists.js';
|
||||
import isTaskDependentOn from './task-manager/is-task-dependent.js';
|
||||
|
||||
import { readComplexityReport } from './utils.js';
|
||||
// Export task manager functions
|
||||
export {
|
||||
parsePRD,
|
||||
@@ -45,5 +45,6 @@ export {
|
||||
removeTask,
|
||||
findTaskById,
|
||||
taskExists,
|
||||
isTaskDependentOn
|
||||
isTaskDependentOn,
|
||||
readComplexityReport
|
||||
};
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
import { log } from '../utils.js';
|
||||
import { addComplexityToTask } from '../utils.js';
|
||||
|
||||
/**
|
||||
* Return the next work item:
|
||||
* • Prefer an eligible SUBTASK that belongs to any parent task
|
||||
@@ -15,9 +18,10 @@
|
||||
* ─ parentId → number (present only when it's a subtask)
|
||||
*
|
||||
* @param {Object[]} tasks – full array of top-level tasks, each may contain .subtasks[]
|
||||
* @param {Object} [complexityReport=null] - Optional complexity report object
|
||||
* @returns {Object|null} – next work item or null if nothing is eligible
|
||||
*/
|
||||
function findNextTask(tasks) {
|
||||
function findNextTask(tasks, complexityReport = null) {
|
||||
// ---------- helpers ----------------------------------------------------
|
||||
const priorityValues = { high: 3, medium: 2, low: 1 };
|
||||
|
||||
@@ -91,7 +95,14 @@ function findNextTask(tasks) {
|
||||
if (aPar !== bPar) return aPar - bPar;
|
||||
return aSub - bSub;
|
||||
});
|
||||
return candidateSubtasks[0];
|
||||
const nextTask = candidateSubtasks[0];
|
||||
|
||||
// Add complexity to the task before returning
|
||||
if (nextTask && complexityReport) {
|
||||
addComplexityToTask(nextTask, complexityReport);
|
||||
}
|
||||
|
||||
return nextTask;
|
||||
}
|
||||
|
||||
// ---------- 2) fall back to top-level tasks (original logic) ------------
|
||||
@@ -116,6 +127,11 @@ function findNextTask(tasks) {
|
||||
return a.id - b.id;
|
||||
})[0];
|
||||
|
||||
// Add complexity to the task before returning
|
||||
if (nextTask && complexityReport) {
|
||||
addComplexityToTask(nextTask, complexityReport);
|
||||
}
|
||||
|
||||
return nextTask;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,13 +2,20 @@ import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
|
||||
import { log, readJSON, truncate } from '../utils.js';
|
||||
import {
|
||||
log,
|
||||
readJSON,
|
||||
truncate,
|
||||
readComplexityReport,
|
||||
addComplexityToTask
|
||||
} from '../utils.js';
|
||||
import findNextTask from './find-next-task.js';
|
||||
|
||||
import {
|
||||
displayBanner,
|
||||
getStatusWithColor,
|
||||
formatDependenciesWithStatus,
|
||||
getComplexityWithColor,
|
||||
createProgressBar
|
||||
} from '../ui.js';
|
||||
|
||||
@@ -16,6 +23,7 @@ import {
|
||||
* List all tasks
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} statusFilter - Filter by status
|
||||
* @param {string} reportPath - Path to the complexity report
|
||||
* @param {boolean} withSubtasks - Whether to show subtasks
|
||||
* @param {string} outputFormat - Output format (text or json)
|
||||
* @returns {Object} - Task list result for json format
|
||||
@@ -23,6 +31,7 @@ import {
|
||||
function listTasks(
|
||||
tasksPath,
|
||||
statusFilter,
|
||||
reportPath = null,
|
||||
withSubtasks = false,
|
||||
outputFormat = 'text'
|
||||
) {
|
||||
@@ -37,6 +46,13 @@ function listTasks(
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Add complexity scores to tasks if report exists
|
||||
const complexityReport = readComplexityReport(reportPath);
|
||||
// Apply complexity scores to tasks
|
||||
if (complexityReport && complexityReport.complexityAnalysis) {
|
||||
data.tasks.forEach((task) => addComplexityToTask(task, complexityReport));
|
||||
}
|
||||
|
||||
// Filter tasks by status if specified
|
||||
const filteredTasks =
|
||||
statusFilter && statusFilter.toLowerCase() !== 'all' // <-- Added check for 'all'
|
||||
@@ -257,8 +273,8 @@ function listTasks(
|
||||
);
|
||||
const avgDependenciesPerTask = totalDependencies / data.tasks.length;
|
||||
|
||||
// Find next task to work on
|
||||
const nextItem = findNextTask(data.tasks);
|
||||
// Find next task to work on, passing the complexity report
|
||||
const nextItem = findNextTask(data.tasks, complexityReport);
|
||||
|
||||
// Get terminal width - more reliable method
|
||||
let terminalWidth;
|
||||
@@ -301,8 +317,11 @@ function listTasks(
|
||||
`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\n\n` +
|
||||
chalk.cyan.bold('Next Task to Work On:') +
|
||||
'\n' +
|
||||
`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}\n` +
|
||||
`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : ''}`;
|
||||
`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}
|
||||
` +
|
||||
`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : ''}
|
||||
` +
|
||||
`Complexity: ${nextItem && nextItem.complexityScore ? getComplexityWithColor(nextItem.complexityScore) : chalk.gray('N/A')}`;
|
||||
|
||||
// Calculate width for side-by-side display
|
||||
// Box borders, padding take approximately 4 chars on each side
|
||||
@@ -412,9 +431,16 @@ function listTasks(
|
||||
// Make dependencies column smaller as requested (-20%)
|
||||
const depsWidthPct = 20;
|
||||
|
||||
const complexityWidthPct = 10;
|
||||
|
||||
// Calculate title/description width as remaining space (+20% from dependencies reduction)
|
||||
const titleWidthPct =
|
||||
100 - idWidthPct - statusWidthPct - priorityWidthPct - depsWidthPct;
|
||||
100 -
|
||||
idWidthPct -
|
||||
statusWidthPct -
|
||||
priorityWidthPct -
|
||||
depsWidthPct -
|
||||
complexityWidthPct;
|
||||
|
||||
// Allow 10 characters for borders and padding
|
||||
const availableWidth = terminalWidth - 10;
|
||||
@@ -424,6 +450,9 @@ function listTasks(
|
||||
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
|
||||
const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));
|
||||
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
|
||||
const complexityWidth = Math.floor(
|
||||
availableWidth * (complexityWidthPct / 100)
|
||||
);
|
||||
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
|
||||
|
||||
// Create a table with correct borders and spacing
|
||||
@@ -433,9 +462,17 @@ function listTasks(
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Status'),
|
||||
chalk.cyan.bold('Priority'),
|
||||
chalk.cyan.bold('Dependencies')
|
||||
chalk.cyan.bold('Dependencies'),
|
||||
chalk.cyan.bold('Complexity')
|
||||
],
|
||||
colWidths: [
|
||||
idWidth,
|
||||
titleWidth,
|
||||
statusWidth,
|
||||
priorityWidth,
|
||||
depsWidth,
|
||||
complexityWidth // Added complexity column width
|
||||
],
|
||||
colWidths: [idWidth, titleWidth, statusWidth, priorityWidth, depsWidth],
|
||||
style: {
|
||||
head: [], // No special styling for header
|
||||
border: [], // No special styling for border
|
||||
@@ -454,7 +491,8 @@ function listTasks(
|
||||
depText = formatDependenciesWithStatus(
|
||||
task.dependencies,
|
||||
data.tasks,
|
||||
true
|
||||
true,
|
||||
complexityReport
|
||||
);
|
||||
} else {
|
||||
depText = chalk.gray('None');
|
||||
@@ -480,7 +518,10 @@ function listTasks(
|
||||
truncate(cleanTitle, titleWidth - 3),
|
||||
status,
|
||||
priorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),
|
||||
depText // No truncation for dependencies
|
||||
depText,
|
||||
task.complexityScore
|
||||
? getComplexityWithColor(task.complexityScore)
|
||||
: chalk.gray('N/A')
|
||||
]);
|
||||
|
||||
// Add subtasks if requested
|
||||
@@ -516,6 +557,8 @@ function listTasks(
|
||||
// Default to regular task dependency
|
||||
const depTask = data.tasks.find((t) => t.id === depId);
|
||||
if (depTask) {
|
||||
// Add complexity to depTask before checking status
|
||||
addComplexityToTask(depTask, complexityReport);
|
||||
const isDone =
|
||||
depTask.status === 'done' || depTask.status === 'completed';
|
||||
const isInProgress = depTask.status === 'in-progress';
|
||||
@@ -541,7 +584,10 @@ function listTasks(
|
||||
chalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),
|
||||
getStatusWithColor(subtask.status, true),
|
||||
chalk.dim('-'),
|
||||
subtaskDepText // No truncation for dependencies
|
||||
subtaskDepText,
|
||||
subtask.complexityScore
|
||||
? chalk.gray(`${subtask.complexityScore}`)
|
||||
: chalk.gray('N/A')
|
||||
]);
|
||||
});
|
||||
}
|
||||
@@ -597,6 +643,8 @@ function listTasks(
|
||||
subtasksSection = `\n\n${chalk.white.bold('Subtasks:')}\n`;
|
||||
subtasksSection += parentTaskForSubtasks.subtasks
|
||||
.map((subtask) => {
|
||||
// Add complexity to subtask before display
|
||||
addComplexityToTask(subtask, complexityReport);
|
||||
// Using a more simplified format for subtask status display
|
||||
const status = subtask.status || 'pending';
|
||||
const statusColors = {
|
||||
@@ -625,8 +673,8 @@ function listTasks(
|
||||
'\n\n' +
|
||||
// Use nextItem.priority, nextItem.status, nextItem.dependencies
|
||||
`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\n` +
|
||||
`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : chalk.gray('None')}\n\n` +
|
||||
// Use nextItem.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
|
||||
`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : chalk.gray('None')}\n\n` +
|
||||
// Use nextTask.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
|
||||
// *** Fetching original item for description and details ***
|
||||
`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +
|
||||
subtasksSection + // <-- Subtasks are handled above now
|
||||
|
||||
@@ -8,6 +8,10 @@ import { validateTaskDependencies } from '../dependency-manager.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import updateSingleTaskStatus from './update-single-task-status.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import {
|
||||
isValidTaskStatus,
|
||||
TASK_STATUS_OPTIONS
|
||||
} from '../../../src/constants/task-status.js';
|
||||
|
||||
/**
|
||||
* Set the status of a task
|
||||
@@ -19,6 +23,11 @@ import generateTaskFiles from './generate-task-files.js';
|
||||
*/
|
||||
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
||||
try {
|
||||
if (!isValidTaskStatus(newStatus)) {
|
||||
throw new Error(
|
||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
);
|
||||
}
|
||||
// Determine if we're in MCP mode by checking for mcpLog
|
||||
const isMcpMode = !!options?.mcpLog;
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import chalk from 'chalk';
|
||||
|
||||
import { log } from '../utils.js';
|
||||
import { isValidTaskStatus } from '../../../src/constants/task-status.js';
|
||||
|
||||
/**
|
||||
* Update the status of a single task
|
||||
@@ -17,6 +18,12 @@ async function updateSingleTaskStatus(
|
||||
data,
|
||||
showUi = true
|
||||
) {
|
||||
if (!isValidTaskStatus(newStatus)) {
|
||||
throw new Error(
|
||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Check if it's a subtask (e.g., "1.2")
|
||||
if (taskIdInput.includes('.')) {
|
||||
const [parentId, subtaskId] = taskIdInput
|
||||
|
||||
@@ -16,10 +16,15 @@ import {
|
||||
truncate,
|
||||
isSilentMode
|
||||
} from './utils.js';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
||||
import {
|
||||
findNextTask,
|
||||
analyzeTaskComplexity,
|
||||
readComplexityReport
|
||||
} from './task-manager.js';
|
||||
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
||||
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
|
||||
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||
|
||||
// Create a color gradient for the banner
|
||||
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
||||
@@ -46,17 +51,7 @@ function displayBanner() {
|
||||
);
|
||||
|
||||
// Read version directly from package.json
|
||||
let version = 'unknown'; // Initialize with a default
|
||||
try {
|
||||
const packageJsonPath = path.join(process.cwd(), 'package.json');
|
||||
if (fs.existsSync(packageJsonPath)) {
|
||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
||||
version = packageJson.version;
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently fall back to default version
|
||||
log('warn', 'Could not read package.json for version info.');
|
||||
}
|
||||
const version = getTaskMasterVersion();
|
||||
|
||||
console.log(
|
||||
boxen(
|
||||
@@ -273,12 +268,14 @@ function getStatusWithColor(status, forTable = false) {
|
||||
* @param {Array} dependencies - Array of dependency IDs
|
||||
* @param {Array} allTasks - Array of all tasks
|
||||
* @param {boolean} forConsole - Whether the output is for console display
|
||||
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
|
||||
* @returns {string} Formatted dependencies string
|
||||
*/
|
||||
function formatDependenciesWithStatus(
|
||||
dependencies,
|
||||
allTasks,
|
||||
forConsole = false
|
||||
forConsole = false,
|
||||
complexityReport = null // Add complexityReport parameter
|
||||
) {
|
||||
if (
|
||||
!dependencies ||
|
||||
@@ -342,7 +339,11 @@ function formatDependenciesWithStatus(
|
||||
typeof depId === 'string' ? parseInt(depId, 10) : depId;
|
||||
|
||||
// Look up the task using the numeric ID
|
||||
const depTaskResult = findTaskById(allTasks, numericDepId);
|
||||
const depTaskResult = findTaskById(
|
||||
allTasks,
|
||||
numericDepId,
|
||||
complexityReport
|
||||
);
|
||||
const depTask = depTaskResult.task; // Access the task object from the result
|
||||
|
||||
if (!depTask) {
|
||||
@@ -458,7 +459,7 @@ function displayHelp() {
|
||||
{
|
||||
name: 'set-status',
|
||||
args: '--id=<id> --status=<status>',
|
||||
desc: 'Update task status (done, pending, etc.)'
|
||||
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`
|
||||
},
|
||||
{
|
||||
name: 'update',
|
||||
@@ -761,7 +762,7 @@ function truncateString(str, maxLength) {
|
||||
* Display the next task to work on
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
*/
|
||||
async function displayNextTask(tasksPath) {
|
||||
async function displayNextTask(tasksPath, complexityReportPath = null) {
|
||||
displayBanner();
|
||||
|
||||
// Read the tasks file
|
||||
@@ -771,8 +772,11 @@ async function displayNextTask(tasksPath) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Read complexity report once
|
||||
const complexityReport = readComplexityReport(complexityReportPath);
|
||||
|
||||
// Find the next task
|
||||
const nextTask = findNextTask(data.tasks);
|
||||
const nextTask = findNextTask(data.tasks, complexityReport);
|
||||
|
||||
if (!nextTask) {
|
||||
console.log(
|
||||
@@ -809,12 +813,7 @@ async function displayNextTask(tasksPath) {
|
||||
'padding-bottom': 0,
|
||||
compact: true
|
||||
},
|
||||
chars: {
|
||||
mid: '',
|
||||
'left-mid': '',
|
||||
'mid-mid': '',
|
||||
'right-mid': ''
|
||||
},
|
||||
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },
|
||||
colWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],
|
||||
wordWrap: true
|
||||
});
|
||||
@@ -838,7 +837,18 @@ async function displayNextTask(tasksPath) {
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Dependencies:'),
|
||||
formatDependenciesWithStatus(nextTask.dependencies, data.tasks, true)
|
||||
formatDependenciesWithStatus(
|
||||
nextTask.dependencies,
|
||||
data.tasks,
|
||||
true,
|
||||
complexityReport
|
||||
)
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Complexity:'),
|
||||
nextTask.complexityScore
|
||||
? getComplexityWithColor(nextTask.complexityScore)
|
||||
: chalk.gray('N/A')
|
||||
],
|
||||
[chalk.cyan.bold('Description:'), nextTask.description]
|
||||
);
|
||||
@@ -902,12 +912,7 @@ async function displayNextTask(tasksPath) {
|
||||
'padding-bottom': 0,
|
||||
compact: true
|
||||
},
|
||||
chars: {
|
||||
mid: '',
|
||||
'left-mid': '',
|
||||
'mid-mid': '',
|
||||
'right-mid': ''
|
||||
},
|
||||
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },
|
||||
wordWrap: true
|
||||
});
|
||||
|
||||
@@ -1011,7 +1016,12 @@ async function displayNextTask(tasksPath) {
|
||||
* @param {string|number} taskId - The ID of the task to display
|
||||
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
||||
*/
|
||||
async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
||||
async function displayTaskById(
|
||||
tasksPath,
|
||||
taskId,
|
||||
complexityReportPath = null,
|
||||
statusFilter = null
|
||||
) {
|
||||
displayBanner();
|
||||
|
||||
// Read the tasks file
|
||||
@@ -1021,11 +1031,15 @@ async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Read complexity report once
|
||||
const complexityReport = readComplexityReport(complexityReportPath);
|
||||
|
||||
// Find the task by ID, applying the status filter if provided
|
||||
// Returns { task, originalSubtaskCount, originalSubtasks }
|
||||
const { task, originalSubtaskCount, originalSubtasks } = findTaskById(
|
||||
data.tasks,
|
||||
taskId,
|
||||
complexityReport,
|
||||
statusFilter
|
||||
);
|
||||
|
||||
@@ -1080,6 +1094,12 @@ async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
||||
chalk.cyan.bold('Status:'),
|
||||
getStatusWithColor(task.status || 'pending', true)
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Complexity:'),
|
||||
task.complexityScore
|
||||
? getComplexityWithColor(task.complexityScore)
|
||||
: chalk.gray('N/A')
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Description:'),
|
||||
task.description || 'No description provided.'
|
||||
@@ -1158,7 +1178,18 @@ async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
||||
[chalk.cyan.bold('Priority:'), priorityColor(task.priority || 'medium')],
|
||||
[
|
||||
chalk.cyan.bold('Dependencies:'),
|
||||
formatDependenciesWithStatus(task.dependencies, data.tasks, true)
|
||||
formatDependenciesWithStatus(
|
||||
task.dependencies,
|
||||
data.tasks,
|
||||
true,
|
||||
complexityReport
|
||||
)
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Complexity:'),
|
||||
task.complexityScore
|
||||
? getComplexityWithColor(task.complexityScore)
|
||||
: chalk.gray('N/A')
|
||||
],
|
||||
[chalk.cyan.bold('Description:'), task.description]
|
||||
);
|
||||
|
||||
@@ -275,6 +275,22 @@ function findTaskInComplexityReport(report, taskId) {
|
||||
return report.complexityAnalysis.find((task) => task.taskId === taskId);
|
||||
}
|
||||
|
||||
function addComplexityToTask(task, complexityReport) {
|
||||
let taskId;
|
||||
if (task.isSubtask) {
|
||||
taskId = task.parentTask.id;
|
||||
} else if (task.parentId) {
|
||||
taskId = task.parentId;
|
||||
} else {
|
||||
taskId = task.id;
|
||||
}
|
||||
|
||||
const taskAnalysis = findTaskInComplexityReport(complexityReport, taskId);
|
||||
if (taskAnalysis) {
|
||||
task.complexityScore = taskAnalysis.complexityScore;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a task exists in the tasks array
|
||||
* @param {Array} tasks - The tasks array
|
||||
@@ -325,10 +341,17 @@ function formatTaskId(id) {
|
||||
* Finds a task by ID in the tasks array. Optionally filters subtasks by status.
|
||||
* @param {Array} tasks - The tasks array
|
||||
* @param {string|number} taskId - The task ID to find
|
||||
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
|
||||
* @returns {Object|null} The task object or null if not found
|
||||
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
||||
* @returns {{task: Object|null, originalSubtaskCount: number|null}} The task object (potentially with filtered subtasks) and the original subtask count if filtered, or nulls if not found.
|
||||
*/
|
||||
function findTaskById(tasks, taskId, statusFilter = null) {
|
||||
function findTaskById(
|
||||
tasks,
|
||||
taskId,
|
||||
complexityReport = null,
|
||||
statusFilter = null
|
||||
) {
|
||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||
return { task: null, originalSubtaskCount: null };
|
||||
}
|
||||
@@ -356,10 +379,17 @@ function findTaskById(tasks, taskId, statusFilter = null) {
|
||||
subtask.isSubtask = true;
|
||||
}
|
||||
|
||||
// Return the found subtask (or null) and null for originalSubtaskCount
|
||||
// If we found a task, check for complexity data
|
||||
if (subtask && complexityReport) {
|
||||
addComplexityToTask(subtask, complexityReport);
|
||||
}
|
||||
|
||||
return { task: subtask || null, originalSubtaskCount: null };
|
||||
}
|
||||
|
||||
let taskResult = null;
|
||||
let originalSubtaskCount = null;
|
||||
|
||||
// Find the main task
|
||||
const id = parseInt(taskId, 10);
|
||||
const task = tasks.find((t) => t.id === id) || null;
|
||||
@@ -369,6 +399,8 @@ function findTaskById(tasks, taskId, statusFilter = null) {
|
||||
return { task: null, originalSubtaskCount: null };
|
||||
}
|
||||
|
||||
taskResult = task;
|
||||
|
||||
// If task found and statusFilter provided, filter its subtasks
|
||||
if (statusFilter && task.subtasks && Array.isArray(task.subtasks)) {
|
||||
const originalSubtaskCount = task.subtasks.length;
|
||||
@@ -379,12 +411,18 @@ function findTaskById(tasks, taskId, statusFilter = null) {
|
||||
subtask.status &&
|
||||
subtask.status.toLowerCase() === statusFilter.toLowerCase()
|
||||
);
|
||||
// Return the filtered task and the original count
|
||||
return { task: filteredTask, originalSubtaskCount: originalSubtaskCount };
|
||||
|
||||
taskResult = filteredTask;
|
||||
originalSubtaskCount = originalSubtaskCount;
|
||||
}
|
||||
|
||||
// Return original task and null count if no filter or no subtasks
|
||||
return { task: task, originalSubtaskCount: null };
|
||||
// If task found and complexityReport provided, add complexity data
|
||||
if (taskResult && complexityReport) {
|
||||
addComplexityToTask(taskResult, complexityReport);
|
||||
}
|
||||
|
||||
// Return the found task and original subtask count
|
||||
return { task: taskResult, originalSubtaskCount };
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -524,10 +562,11 @@ export {
|
||||
findCycles,
|
||||
toKebabCase,
|
||||
detectCamelCaseFlags,
|
||||
enableSilentMode,
|
||||
disableSilentMode,
|
||||
isSilentMode,
|
||||
resolveEnvVariable,
|
||||
enableSilentMode,
|
||||
getTaskManager,
|
||||
isSilentMode,
|
||||
addComplexityToTask,
|
||||
resolveEnvVariable,
|
||||
findProjectRoot
|
||||
};
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* using the Vercel AI SDK.
|
||||
*/
|
||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||
import { generateText, streamText, generateObject, streamObject } from 'ai';
|
||||
import { generateText, streamText, generateObject } from 'ai';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||
|
||||
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
||||
@@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
|
||||
// Remove the global variable and caching logic
|
||||
// let anthropicClient;
|
||||
|
||||
function getClient(apiKey) {
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
// In a real scenario, this would use the config resolver.
|
||||
// Throwing error here if key isn't passed for simplicity.
|
||||
@@ -30,14 +30,12 @@ function getClient(apiKey) {
|
||||
// Create and return a new instance directly with standard version header
|
||||
return createAnthropic({
|
||||
apiKey: apiKey,
|
||||
baseURL: 'https://api.anthropic.com/v1',
|
||||
...(baseUrl && { baseURL: baseUrl }),
|
||||
// Use standard version header instead of beta
|
||||
headers: {
|
||||
'anthropic-beta': 'output-128k-2025-02-19'
|
||||
}
|
||||
});
|
||||
// }
|
||||
// return anthropicClient;
|
||||
}
|
||||
|
||||
// --- Standardized Service Function Implementations ---
|
||||
@@ -51,6 +49,7 @@ function getClient(apiKey) {
|
||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -59,11 +58,12 @@ export async function generateAnthropicText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
@@ -93,6 +93,7 @@ export async function generateAnthropicText({
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
@@ -101,20 +102,20 @@ export async function streamAnthropicText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
|
||||
// --- DEBUG LOGGING --- >>
|
||||
log(
|
||||
'debug',
|
||||
'[streamAnthropicText] Parameters received by streamText:',
|
||||
JSON.stringify(
|
||||
{
|
||||
modelId: modelId, // Log modelId being used
|
||||
messages: messages, // Log the messages array
|
||||
modelId: modelId,
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
},
|
||||
@@ -122,25 +123,19 @@ export async function streamAnthropicText({
|
||||
2
|
||||
)
|
||||
);
|
||||
// --- << DEBUG LOGGING ---
|
||||
|
||||
const stream = await streamText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
// Beta header moved to client initialization
|
||||
// TODO: Add other relevant parameters
|
||||
});
|
||||
|
||||
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Anthropic streamText failed: ${error.message}`,
|
||||
error.stack // Log stack trace for more details
|
||||
);
|
||||
log('error', `Anthropic streamText failed: ${error.message}`, error.stack);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -160,6 +155,7 @@ export async function streamAnthropicText({
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
@@ -171,24 +167,22 @@ export async function generateAnthropicObject({
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 3
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
}) {
|
||||
log(
|
||||
'debug',
|
||||
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
|
||||
// Log basic debug info
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
log(
|
||||
'debug',
|
||||
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
||||
);
|
||||
|
||||
const result = await generateObject({
|
||||
model: client(modelId),
|
||||
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
|
||||
mode: 'tool',
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
tool: {
|
||||
@@ -199,14 +193,12 @@ export async function generateAnthropicObject({
|
||||
temperature: temperature,
|
||||
maxRetries: maxRetries
|
||||
});
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
return result.object;
|
||||
} catch (error) {
|
||||
// Simple error logging
|
||||
log(
|
||||
'error',
|
||||
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
||||
|
||||
@@ -12,6 +12,16 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
||||
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
|
||||
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
}
|
||||
return createGoogleGenerativeAI({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using a Google AI model.
|
||||
*
|
||||
@@ -29,7 +39,8 @@ async function generateGoogleText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens // Note: Vercel SDK might handle this differently, needs verification
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
@@ -37,28 +48,21 @@ async function generateGoogleText({
|
||||
log('info', `Generating text with Google model: ${modelId}`);
|
||||
|
||||
try {
|
||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||
const model = googleProvider(modelId); // Correct model retrieval
|
||||
|
||||
// Construct payload suitable for Vercel SDK's generateText
|
||||
// Note: The exact structure might depend on how messages are passed
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
const result = await generateText({
|
||||
model, // Pass the model instance
|
||||
messages, // Pass the messages array directly
|
||||
model,
|
||||
messages,
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available
|
||||
maxOutputTokens: maxTokens
|
||||
});
|
||||
|
||||
// Assuming result structure provides text directly or within a property
|
||||
return result.text; // Adjust based on actual SDK response
|
||||
return result.text;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error generating text with Google (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error; // Re-throw for unified service handler
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +83,8 @@ async function streamGoogleText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
@@ -87,19 +92,15 @@ async function streamGoogleText({
|
||||
log('info', `Streaming text with Google model: ${modelId}`);
|
||||
|
||||
try {
|
||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||
const model = googleProvider(modelId); // Correct model retrieval
|
||||
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
const stream = await streamText({
|
||||
model, // Pass the model instance
|
||||
model,
|
||||
messages,
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
});
|
||||
|
||||
return stream; // Return the stream directly
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
@@ -130,7 +131,8 @@ async function generateGoogleObject({
|
||||
messages,
|
||||
schema,
|
||||
objectName, // Note: Vercel SDK might use this differently or not at all
|
||||
maxTokens
|
||||
maxTokens,
|
||||
baseUrl
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
@@ -138,23 +140,16 @@ async function generateGoogleObject({
|
||||
log('info', `Generating object with Google model: ${modelId}`);
|
||||
|
||||
try {
|
||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||
const model = googleProvider(modelId); // Correct model retrieval
|
||||
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
const { object } = await generateObject({
|
||||
model, // Pass the model instance
|
||||
model,
|
||||
schema,
|
||||
messages,
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
// Note: 'objectName' or 'mode' might not be directly applicable here
|
||||
// depending on how `@ai-sdk/google` handles `generateObject`.
|
||||
// Check SDK docs if specific tool calling/JSON mode needs explicit setup.
|
||||
});
|
||||
|
||||
return object; // Return the parsed object
|
||||
return object;
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
|
||||
@@ -1,16 +1,26 @@
|
||||
import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
||||
import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai'
|
||||
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
||||
import { generateObject } from 'ai'; // Import necessary functions from 'ai'
|
||||
import { log } from '../../scripts/modules/utils.js';
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is required.');
|
||||
}
|
||||
return createOpenAI({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
export async function generateOpenAIText(params) {
|
||||
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
||||
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
||||
|
||||
if (!apiKey) {
|
||||
@@ -23,18 +33,15 @@ export async function generateOpenAIText(params) {
|
||||
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
||||
}
|
||||
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
|
||||
try {
|
||||
const result = await openaiClient.chat(messages, {
|
||||
// Updated: Use openaiClient.chat directly
|
||||
model: modelId,
|
||||
max_tokens: maxTokens,
|
||||
temperature
|
||||
});
|
||||
|
||||
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
|
||||
// This might need refinement based on testing the SDK's output.
|
||||
const textContent = result?.choices?.[0]?.message?.content?.trim();
|
||||
|
||||
if (!textContent) {
|
||||
@@ -65,12 +72,12 @@ export async function generateOpenAIText(params) {
|
||||
/**
|
||||
* Streams text using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
export async function streamOpenAIText(params) {
|
||||
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
||||
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
||||
|
||||
if (!apiKey) {
|
||||
@@ -85,12 +92,10 @@ export async function streamOpenAIText(params) {
|
||||
);
|
||||
}
|
||||
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
|
||||
try {
|
||||
// Use the streamText function from Vercel AI SDK core
|
||||
const stream = await openaiClient.chat.stream(messages, {
|
||||
// Updated: Use openaiClient.chat.stream
|
||||
model: modelId,
|
||||
max_tokens: maxTokens,
|
||||
temperature
|
||||
@@ -100,7 +105,6 @@ export async function streamOpenAIText(params) {
|
||||
'debug',
|
||||
`OpenAI streamText initiated successfully for model: ${modelId}`
|
||||
);
|
||||
// The Vercel SDK's streamText should directly return the stream object
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
@@ -117,7 +121,7 @@ export async function streamOpenAIText(params) {
|
||||
/**
|
||||
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If API call fails or object generation fails.
|
||||
*/
|
||||
@@ -129,7 +133,8 @@ export async function generateOpenAIObject(params) {
|
||||
schema,
|
||||
objectName,
|
||||
maxTokens,
|
||||
temperature
|
||||
temperature,
|
||||
baseUrl
|
||||
} = params;
|
||||
log(
|
||||
'debug',
|
||||
@@ -145,10 +150,9 @@ export async function generateOpenAIObject(params) {
|
||||
if (!objectName)
|
||||
throw new Error('Object name is required for OpenAI object generation.');
|
||||
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
|
||||
try {
|
||||
// Use the imported generateObject function from 'ai' package
|
||||
const result = await generateObject({
|
||||
model: openaiClient(modelId),
|
||||
schema: schema,
|
||||
|
||||
@@ -2,6 +2,14 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
||||
import { generateText, streamText, generateObject } from 'ai';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
return createOpenRouter({
|
||||
apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using an OpenRouter chat model.
|
||||
*
|
||||
@@ -11,6 +19,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in
|
||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||
* @param {number} [params.temperature] - Sampling temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -20,6 +29,7 @@ async function generateOpenRouterText({
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest // Capture any other Vercel AI SDK compatible parameters
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
@@ -28,7 +38,7 @@ async function generateOpenRouterText({
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
|
||||
try {
|
||||
const openrouter = createOpenRouter({ apiKey });
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const model = openrouter.chat(modelId); // Assuming chat model
|
||||
|
||||
const { text } = await generateText({
|
||||
@@ -58,6 +68,7 @@ async function generateOpenRouterText({
|
||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||
* @param {number} [params.temperature] - Sampling temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -67,6 +78,7 @@ async function streamOpenRouterText({
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
@@ -75,7 +87,7 @@ async function streamOpenRouterText({
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
|
||||
try {
|
||||
const openrouter = createOpenRouter({ apiKey });
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const model = openrouter.chat(modelId);
|
||||
|
||||
// Directly return the stream from the Vercel AI SDK function
|
||||
@@ -108,6 +120,7 @@ async function streamOpenRouterText({
|
||||
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens.
|
||||
* @param {number} [params.temperature] - Temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If the API call fails or validation fails.
|
||||
*/
|
||||
@@ -120,6 +133,7 @@ async function generateOpenRouterObject({
|
||||
maxRetries = 3,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
@@ -129,7 +143,7 @@ async function generateOpenRouterObject({
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
|
||||
try {
|
||||
const openrouter = createOpenRouter({ apiKey });
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const model = openrouter.chat(modelId);
|
||||
|
||||
const { object } = await generateObject({
|
||||
|
||||
@@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js';
|
||||
|
||||
// --- Client Instantiation ---
|
||||
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
||||
function getClient(apiKey) {
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Perplexity API key is required.');
|
||||
}
|
||||
// Create and return a new instance directly
|
||||
return createPerplexity({
|
||||
apiKey: apiKey
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ function getClient(apiKey) {
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -39,11 +40,12 @@ export async function generatePerplexityText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
@@ -70,6 +72,7 @@ export async function generatePerplexityText({
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
@@ -78,11 +81,12 @@ export async function streamPerplexityText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const stream = await streamText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
@@ -112,6 +116,7 @@ export async function streamPerplexityText({
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails or is unsupported.
|
||||
*/
|
||||
@@ -123,7 +128,8 @@ export async function generatePerplexityObject({
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 1 // Lower retries as support might be limited
|
||||
maxRetries = 1,
|
||||
baseUrl
|
||||
}) {
|
||||
log(
|
||||
'debug',
|
||||
@@ -134,8 +140,7 @@ export async function generatePerplexityObject({
|
||||
'generateObject support for Perplexity might be limited or experimental.'
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
// Attempt using generateObject, but be prepared for potential issues
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateObject({
|
||||
model: client(modelId),
|
||||
schema: schema,
|
||||
|
||||
@@ -9,14 +9,13 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||
|
||||
// --- Client Instantiation ---
|
||||
function getClient(apiKey) {
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('xAI API key is required.');
|
||||
}
|
||||
// Create and return a new instance directly
|
||||
return createXai({
|
||||
apiKey: apiKey
|
||||
// Add baseURL or other options if needed later
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
@@ -31,6 +30,7 @@ function getClient(apiKey) {
|
||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -39,13 +39,14 @@ export async function generateXaiText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Generating xAI text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateText({
|
||||
model: client(modelId), // Correct model invocation
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
@@ -70,6 +71,7 @@ export async function generateXaiText({
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
@@ -78,18 +80,19 @@ export async function streamXaiText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature
|
||||
temperature,
|
||||
baseUrl
|
||||
}) {
|
||||
log('debug', `Streaming xAI text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const stream = await streamText({
|
||||
model: client(modelId), // Correct model invocation
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
});
|
||||
return stream; // Return the full stream object
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
||||
throw error;
|
||||
@@ -110,6 +113,7 @@ export async function streamXaiText({
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
@@ -121,16 +125,17 @@ export async function generateXaiObject({
|
||||
objectName = 'generated_xai_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 3
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
}) {
|
||||
log(
|
||||
'warn', // Log warning as this is likely unsupported
|
||||
'warn',
|
||||
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey);
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const result = await generateObject({
|
||||
model: client(modelId), // Correct model invocation
|
||||
model: client(modelId),
|
||||
// Note: mode might need adjustment if xAI ever supports object generation differently
|
||||
mode: 'tool',
|
||||
schema: schema,
|
||||
@@ -153,6 +158,6 @@ export async function generateXaiObject({
|
||||
'error',
|
||||
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
||||
);
|
||||
throw error; // Re-throw the error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
32
src/constants/task-status.js
Normal file
32
src/constants/task-status.js
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus
|
||||
*/
|
||||
|
||||
/**
|
||||
* Task status options list
|
||||
* @type {TaskStatus[]}
|
||||
* @description Defines possible task statuses:
|
||||
* - pending: Task waiting to start
|
||||
* - done: Task completed
|
||||
* - in-progress: Task in progress
|
||||
* - review: Task completed and waiting for review
|
||||
* - deferred: Task postponed or paused
|
||||
* - cancelled: Task cancelled and will not be completed
|
||||
*/
|
||||
export const TASK_STATUS_OPTIONS = [
|
||||
'pending',
|
||||
'done',
|
||||
'in-progress',
|
||||
'review',
|
||||
'deferred',
|
||||
'cancelled'
|
||||
];
|
||||
|
||||
/**
|
||||
* Check if a given status is a valid task status
|
||||
* @param {string} status - The status to check
|
||||
* @returns {boolean} True if the status is valid, false otherwise
|
||||
*/
|
||||
export function isValidTaskStatus(status) {
|
||||
return TASK_STATUS_OPTIONS.includes(status);
|
||||
}
|
||||
35
src/utils/getVersion.js
Normal file
35
src/utils/getVersion.js
Normal file
@@ -0,0 +1,35 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { log } from '../../scripts/modules/utils.js';
|
||||
|
||||
/**
|
||||
* Reads the version from the nearest package.json relative to this file.
|
||||
* Returns 'unknown' if not found or on error.
|
||||
* @returns {string} The version string or 'unknown'.
|
||||
*/
|
||||
export function getTaskMasterVersion() {
|
||||
let version = 'unknown';
|
||||
try {
|
||||
// Get the directory of the current module (getPackageVersion.js)
|
||||
const currentModuleFilename = fileURLToPath(import.meta.url);
|
||||
const currentModuleDirname = path.dirname(currentModuleFilename);
|
||||
// Construct the path to package.json relative to this file (../../package.json)
|
||||
const packageJsonPath = path.join(
|
||||
currentModuleDirname,
|
||||
'..',
|
||||
'..',
|
||||
'package.json'
|
||||
);
|
||||
|
||||
if (fs.existsSync(packageJsonPath)) {
|
||||
const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8');
|
||||
const packageJson = JSON.parse(packageJsonContent);
|
||||
version = packageJson.version;
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently fall back to default version
|
||||
log('warn', 'Could not read own package.json for version info.', error);
|
||||
}
|
||||
return version;
|
||||
}
|
||||
@@ -3,9 +3,8 @@
|
||||
*/
|
||||
|
||||
import { jest } from '@jest/globals';
|
||||
import path from 'path';
|
||||
import path, { dirname } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname } from 'path';
|
||||
|
||||
// Get the current module's directory
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
@@ -27,6 +26,7 @@ const mockReadJSON = jest.fn();
|
||||
const mockWriteJSON = jest.fn();
|
||||
const mockEnableSilentMode = jest.fn();
|
||||
const mockDisableSilentMode = jest.fn();
|
||||
const mockReadComplexityReport = jest.fn().mockReturnValue(null);
|
||||
|
||||
const mockGetAnthropicClient = jest.fn().mockReturnValue({});
|
||||
const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({});
|
||||
@@ -130,6 +130,7 @@ jest.mock('../../../scripts/modules/utils.js', () => ({
|
||||
writeJSON: mockWriteJSON,
|
||||
enableSilentMode: mockEnableSilentMode,
|
||||
disableSilentMode: mockDisableSilentMode,
|
||||
readComplexityReport: mockReadComplexityReport,
|
||||
CONFIG: {
|
||||
model: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 64000,
|
||||
@@ -160,15 +161,6 @@ jest.mock('../../../scripts/modules/task-manager.js', () => ({
|
||||
}));
|
||||
|
||||
// Import dependencies after mocks are set up
|
||||
import fs from 'fs';
|
||||
import {
|
||||
readJSON,
|
||||
writeJSON,
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
} from '../../../scripts/modules/utils.js';
|
||||
import { expandTask } from '../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../../../mcp-server/src/core/utils/path-utils.js';
|
||||
import { sampleTasks } from '../../fixtures/sample-tasks.js';
|
||||
|
||||
// Mock logger
|
||||
@@ -220,6 +212,37 @@ describe('MCP Server Direct Functions', () => {
|
||||
});
|
||||
|
||||
describe('listTasksDirect', () => {
|
||||
// Sample complexity report for testing
|
||||
const mockComplexityReport = {
|
||||
meta: {
|
||||
generatedAt: '2025-03-24T20:01:35.986Z',
|
||||
tasksAnalyzed: 3,
|
||||
thresholdScore: 5,
|
||||
projectName: 'Test Project',
|
||||
usedResearch: false
|
||||
},
|
||||
complexityAnalysis: [
|
||||
{
|
||||
taskId: 1,
|
||||
taskTitle: 'Initialize Project',
|
||||
complexityScore: 3,
|
||||
recommendedSubtasks: 2
|
||||
},
|
||||
{
|
||||
taskId: 2,
|
||||
taskTitle: 'Create Core Functionality',
|
||||
complexityScore: 8,
|
||||
recommendedSubtasks: 5
|
||||
},
|
||||
{
|
||||
taskId: 3,
|
||||
taskTitle: 'Implement UI Components',
|
||||
complexityScore: 6,
|
||||
recommendedSubtasks: 4
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
// Test wrapper function that doesn't rely on the actual implementation
|
||||
async function testListTasks(args, mockLogger) {
|
||||
// File not found case
|
||||
@@ -235,21 +258,35 @@ describe('MCP Server Direct Functions', () => {
|
||||
};
|
||||
}
|
||||
|
||||
// Check for complexity report
|
||||
const complexityReport = mockReadComplexityReport();
|
||||
let tasksData = [...sampleTasks.tasks];
|
||||
|
||||
// Add complexity scores if report exists
|
||||
if (complexityReport && complexityReport.complexityAnalysis) {
|
||||
tasksData = tasksData.map((task) => {
|
||||
const analysis = complexityReport.complexityAnalysis.find(
|
||||
(a) => a.taskId === task.id
|
||||
);
|
||||
if (analysis) {
|
||||
return { ...task, complexityScore: analysis.complexityScore };
|
||||
}
|
||||
return task;
|
||||
});
|
||||
}
|
||||
|
||||
// Success case
|
||||
if (!args.status && !args.withSubtasks) {
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
tasks: sampleTasks.tasks,
|
||||
tasks: tasksData,
|
||||
stats: {
|
||||
total: sampleTasks.tasks.length,
|
||||
completed: sampleTasks.tasks.filter((t) => t.status === 'done')
|
||||
total: tasksData.length,
|
||||
completed: tasksData.filter((t) => t.status === 'done').length,
|
||||
inProgress: tasksData.filter((t) => t.status === 'in-progress')
|
||||
.length,
|
||||
inProgress: sampleTasks.tasks.filter(
|
||||
(t) => t.status === 'in-progress'
|
||||
).length,
|
||||
pending: sampleTasks.tasks.filter((t) => t.status === 'pending')
|
||||
.length
|
||||
pending: tasksData.filter((t) => t.status === 'pending').length
|
||||
}
|
||||
},
|
||||
fromCache: false
|
||||
@@ -258,16 +295,14 @@ describe('MCP Server Direct Functions', () => {
|
||||
|
||||
// Status filter case
|
||||
if (args.status) {
|
||||
const filteredTasks = sampleTasks.tasks.filter(
|
||||
(t) => t.status === args.status
|
||||
);
|
||||
const filteredTasks = tasksData.filter((t) => t.status === args.status);
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
tasks: filteredTasks,
|
||||
filter: args.status,
|
||||
stats: {
|
||||
total: sampleTasks.tasks.length,
|
||||
total: tasksData.length,
|
||||
filtered: filteredTasks.length
|
||||
}
|
||||
},
|
||||
@@ -280,10 +315,10 @@ describe('MCP Server Direct Functions', () => {
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
tasks: sampleTasks.tasks,
|
||||
tasks: tasksData,
|
||||
includeSubtasks: true,
|
||||
stats: {
|
||||
total: sampleTasks.tasks.length
|
||||
total: tasksData.length
|
||||
}
|
||||
},
|
||||
fromCache: false
|
||||
@@ -370,6 +405,29 @@ describe('MCP Server Direct Functions', () => {
|
||||
expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR');
|
||||
expect(mockLogger.error).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should include complexity scores when complexity report exists', async () => {
|
||||
// Arrange
|
||||
mockReadComplexityReport.mockReturnValueOnce(mockComplexityReport);
|
||||
const args = {
|
||||
projectRoot: testProjectRoot,
|
||||
file: testTasksPath,
|
||||
withSubtasks: true
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await testListTasks(args, mockLogger);
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
|
||||
// Check that tasks have complexity scores from the report
|
||||
mockComplexityReport.complexityAnalysis.forEach((analysis) => {
|
||||
const task = result.data.tasks.find((t) => t.id === analysis.taskId);
|
||||
if (task) {
|
||||
expect(task.complexityScore).toBe(analysis.complexityScore);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('expandTaskDirect', () => {
|
||||
|
||||
@@ -9,7 +9,7 @@ process.env.MODEL = 'sonar-pro';
|
||||
process.env.MAX_TOKENS = '64000';
|
||||
process.env.TEMPERATURE = '0.2';
|
||||
process.env.DEBUG = 'false';
|
||||
process.env.LOG_LEVEL = 'error'; // Set to error to reduce noise in tests
|
||||
process.env.TASKMASTER_LOG_LEVEL = 'error'; // Set to error to reduce noise in tests
|
||||
process.env.DEFAULT_SUBTASKS = '5';
|
||||
process.env.DEFAULT_PRIORITY = 'medium';
|
||||
process.env.PROJECT_NAME = 'Test Project';
|
||||
|
||||
@@ -8,6 +8,7 @@ const mockGetResearchModelId = jest.fn();
|
||||
const mockGetFallbackProvider = jest.fn();
|
||||
const mockGetFallbackModelId = jest.fn();
|
||||
const mockGetParametersForRole = jest.fn();
|
||||
const mockGetBaseUrlForRole = jest.fn();
|
||||
|
||||
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||
getMainProvider: mockGetMainProvider,
|
||||
@@ -16,7 +17,8 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||
getResearchModelId: mockGetResearchModelId,
|
||||
getFallbackProvider: mockGetFallbackProvider,
|
||||
getFallbackModelId: mockGetFallbackModelId,
|
||||
getParametersForRole: mockGetParametersForRole
|
||||
getParametersForRole: mockGetParametersForRole,
|
||||
getBaseUrlForRole: mockGetBaseUrlForRole
|
||||
}));
|
||||
|
||||
// Mock AI Provider Modules
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
* Task finder tests
|
||||
*/
|
||||
|
||||
// Import after mocks are set up - No mocks needed for readComplexityReport anymore
|
||||
import { findTaskById } from '../../scripts/modules/utils.js';
|
||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
||||
import { emptySampleTasks, sampleTasks } from '../fixtures/sample-tasks.js';
|
||||
|
||||
describe('Task Finder', () => {
|
||||
describe('findTaskById function', () => {
|
||||
@@ -55,5 +56,62 @@ describe('Task Finder', () => {
|
||||
expect(result.task).toBeNull();
|
||||
expect(result.originalSubtaskCount).toBeNull();
|
||||
});
|
||||
test('should work correctly when no complexity report is provided', () => {
|
||||
// Pass null as the complexity report
|
||||
const result = findTaskById(sampleTasks.tasks, 2, null);
|
||||
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.task.id).toBe(2);
|
||||
expect(result.task.complexityScore).toBeUndefined();
|
||||
});
|
||||
test('should work correctly when task has no complexity data in the provided report', () => {
|
||||
// Define a complexity report that doesn't include task 2
|
||||
const complexityReport = {
|
||||
complexityAnalysis: [{ taskId: 999, complexityScore: 5 }]
|
||||
};
|
||||
|
||||
const result = findTaskById(sampleTasks.tasks, 2, complexityReport);
|
||||
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.task.id).toBe(2);
|
||||
expect(result.task.complexityScore).toBeUndefined();
|
||||
});
|
||||
|
||||
test('should include complexity score when report is provided', () => {
|
||||
// Define the complexity report for this test
|
||||
const complexityReport = {
|
||||
meta: {
|
||||
generatedAt: '2023-01-01T00:00:00.000Z',
|
||||
tasksAnalyzed: 3,
|
||||
thresholdScore: 5
|
||||
},
|
||||
complexityAnalysis: [
|
||||
{
|
||||
taskId: 1,
|
||||
taskTitle: 'Initialize Project',
|
||||
complexityScore: 3,
|
||||
recommendedSubtasks: 2
|
||||
},
|
||||
{
|
||||
taskId: 2,
|
||||
taskTitle: 'Create Core Functionality',
|
||||
complexityScore: 8,
|
||||
recommendedSubtasks: 5
|
||||
},
|
||||
{
|
||||
taskId: 3,
|
||||
taskTitle: 'Implement UI Components',
|
||||
complexityScore: 6,
|
||||
recommendedSubtasks: 4
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
const result = findTaskById(sampleTasks.tasks, 2, complexityReport);
|
||||
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.task.id).toBe(2);
|
||||
expect(result.task.complexityScore).toBe(8);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -199,6 +199,12 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||
|
||||
// Simplified version of updateSingleTaskStatus for testing
|
||||
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||
if (!isValidTaskStatus(newStatus)) {
|
||||
throw new Error(
|
||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Check if it's a subtask (e.g., "1.2")
|
||||
if (taskIdInput.includes('.')) {
|
||||
const [parentId, subtaskId] = taskIdInput
|
||||
@@ -329,6 +335,10 @@ const testAddTask = (
|
||||
import * as taskManager from '../../scripts/modules/task-manager.js';
|
||||
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
|
||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
||||
import {
|
||||
isValidTaskStatus,
|
||||
TASK_STATUS_OPTIONS
|
||||
} from '../../src/constants/task-status.js';
|
||||
|
||||
// Destructure the required functions for convenience
|
||||
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
|
||||
@@ -1165,6 +1175,16 @@ describe('Task Manager Module', () => {
|
||||
expect(testTasksData.tasks[1].status).toBe('done');
|
||||
});
|
||||
|
||||
test('should throw error for invalid status', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
// Assert
|
||||
expect(() =>
|
||||
testUpdateSingleTaskStatus(testTasksData, '2', 'Done')
|
||||
).toThrow(/Error: Invalid status value: Done./);
|
||||
});
|
||||
|
||||
test('should update subtask status', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
Reference in New Issue
Block a user