Compare commits
27 Commits
v0.13.0-rc
...
v0.14.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58b417a8ce | ||
|
|
a8dabf4485 | ||
|
|
da317f2607 | ||
|
|
ed17cb0e0a | ||
|
|
e96734a6cc | ||
|
|
17294ff259 | ||
|
|
a96215a359 | ||
|
|
0a611843b5 | ||
|
|
a1f8d52474 | ||
|
|
c47deeb869 | ||
|
|
dd90c9cb5d | ||
|
|
c7042845d6 | ||
|
|
efce37469b | ||
|
|
4117f71c18 | ||
|
|
09d839fff5 | ||
|
|
90068348d3 | ||
|
|
02e347d2d7 | ||
|
|
0527c363e3 | ||
|
|
735135efe9 | ||
|
|
4fee667a05 | ||
|
|
01963af2cb | ||
|
|
0633895f3b | ||
|
|
10442c1119 | ||
|
|
734a4fdcfc | ||
|
|
8dace2186c | ||
|
|
095e373843 | ||
|
|
0bc9bac392 |
5
.changeset/beige-doodles-type.md
Normal file
5
.changeset/beige-doodles-type.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Resolve all issues related to MCP
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
- Add support for Google Gemini models via Vercel AI SDK integration.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Add xAI provider and Grok models support
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
feat(expand): Enhance `expand` and `expand-all` commands
|
|
||||||
|
|
||||||
- Integrate `task-complexity-report.json` to automatically determine the number of subtasks and use tailored prompts for expansion based on prior analysis. You no longer need to try copy-pasting the recommended prompt. If it exists, it will use it for you. You can just run `task-master update --id=[id of task] --research` and it will use that prompt automatically. No extra prompt needed.
|
|
||||||
- Change default behavior to *append* new subtasks to existing ones. Use the `--force` flag to clear existing subtasks before expanding. This is helpful if you need to add more subtasks to a task but you want to do it by the batch from a given prompt. Use force if you want to start fresh with a task's subtasks.
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Better support for file paths on Windows, Linux & WSL.
|
|
||||||
|
|
||||||
- Standardizes handling of different path formats (URI encoded, Windows, Linux, WSL).
|
|
||||||
- Ensures tools receive a clean, absolute path suitable for the server OS.
|
|
||||||
- Simplifies tool implementation by centralizing normalization logic.
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Adds support for the OpenRouter AI provider. Users can now configure models available through OpenRouter (requiring an `OPENROUTER_API_KEY`) via the `task-master models` command, granting access to a wide range of additional LLMs.
|
|
||||||
- IMPORTANT FYI ABOUT OPENROUTER: Taskmaster relies on AI SDK, which itself relies on tool use. It looks like **free** models sometimes do not include tool use. For example, Gemini 2.5 pro (free) failed via OpenRouter (no tool use) but worked fine on the paid version of the model. Custom model support for Open Router is considered experimental and likely will not be further improved for some time.
|
|
||||||
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Improved update-subtask
|
|
||||||
- Now it has context about the parent task details
|
|
||||||
- It also has context about the subtask before it and the subtask after it (if they exist)
|
|
||||||
- Not passing all subtasks to stay token efficient
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Improve and adjust `init` command for robustness and updated dependencies.
|
|
||||||
|
|
||||||
- **Update Initialization Dependencies:** Ensure newly initialized projects (`task-master init`) include all required AI SDK dependencies (`@ai-sdk/*`, `ai`, provider wrappers) in their `package.json` for out-of-the-box AI feature compatibility. Remove unnecessary dependencies (e.g., `uuid`) from the init template.
|
|
||||||
- **Silence `npm install` during `init`:** Prevent `npm install` output from interfering with non-interactive/MCP initialization by suppressing its stdio in silent mode.
|
|
||||||
- **Improve Conditional Model Setup:** Reliably skip interactive `models --setup` during non-interactive `init` runs (e.g., `init -y` or MCP) by checking `isSilentMode()` instead of passing flags.
|
|
||||||
- **Refactor `init.js`:** Remove internal `isInteractive` flag logic.
|
|
||||||
- **Update `init` Instructions:** Tweak the "Getting Started" text displayed after `init`.
|
|
||||||
- **Fix MCP Server Launch:** Update `.cursor/mcp.json` template to use `node ./mcp-server/server.js` instead of `npx task-master-mcp`.
|
|
||||||
- **Update Default Model:** Change the default main model in the `.taskmasterconfig` template.
|
|
||||||
9
.changeset/floppy-plants-marry.md
Normal file
9
.changeset/floppy-plants-marry.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix CLI --force flag for parse-prd command
|
||||||
|
|
||||||
|
Previously, the --force flag was not respected when running `parse-prd`, causing the command to prompt for confirmation or fail even when --force was provided. This patch ensures that the flag is correctly passed and handled, allowing users to overwrite existing tasks.json files as intended.
|
||||||
|
|
||||||
|
- Fixes #477
|
||||||
5
.changeset/forty-plums-stay.md
Normal file
5
.changeset/forty-plums-stay.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': minor
|
||||||
|
---
|
||||||
|
|
||||||
|
.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider.
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fixes an issue with add-task which did not use the manually defined properties and still needlessly hit the AI endpoint.
|
|
||||||
5
.changeset/many-wasps-sell.md
Normal file
5
.changeset/many-wasps-sell.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Task Master no longer tells you to update when you're already up to date
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Adds model management and new configuration file .taskmasterconfig which houses the models used for main, research and fallback. Adds models command and setter flags. Adds a --setup flag with an interactive setup. We should be calling this during init. Shows a table of active and available models when models is called without flags. Includes SWE scores and token costs, which are manually entered into the supported_models.json, the new place where models are defined for support. Config-manager.js is the core module responsible for managing the new config."
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fixes an issue that prevented remove-subtask with comma separated tasks/subtasks from being deleted (only the first ID was being deleted). Closes #140
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Improves next command to be subtask-aware
|
|
||||||
- The logic for determining the "next task" (findNextTask function, used by task-master next and the next_task MCP tool) has been significantly improved. Previously, it only considered top-level tasks, making its recommendation less useful when a parent task containing subtasks was already marked 'in-progress'.
|
|
||||||
- The updated logic now prioritizes finding the next available subtask within any 'in-progress' parent task, considering subtask dependencies and priority.
|
|
||||||
- If no suitable subtask is found within active parent tasks, it falls back to recommending the next eligible top-level task based on the original criteria (status, dependencies, priority).
|
|
||||||
|
|
||||||
This change makes the next command much more relevant and helpful during the implementation phase of complex tasks.
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Adds custom model ID support for Ollama and OpenRouter providers.
|
|
||||||
- Adds the `--ollama` and `--openrouter` flags to `task-master models --set-<role>` command to set models for those providers outside of the support models list.
|
|
||||||
- Updated `task-master models --setup` interactive mode with options to explicitly enter custom Ollama or OpenRouter model IDs.
|
|
||||||
- Implemented live validation against OpenRouter API (`/api/v1/models`) when setting a custom OpenRouter model ID (via flag or setup).
|
|
||||||
- Refined logic to prioritize explicit provider flags/choices over internal model list lookups in case of ID conflicts.
|
|
||||||
- Added warnings when setting custom/unvalidated models.
|
|
||||||
- We obviously don't recommend going with a custom, unproven model. If you do and find performance is good, please let us know so we can add it to the list of supported models.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Add `--status` flag to `show` command to filter displayed subtasks.
|
|
||||||
12
.changeset/pre.json
Normal file
12
.changeset/pre.json
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"mode": "exit",
|
||||||
|
"tag": "rc",
|
||||||
|
"initialVersions": {
|
||||||
|
"task-master-ai": "0.13.2"
|
||||||
|
},
|
||||||
|
"changesets": [
|
||||||
|
"beige-doodles-type",
|
||||||
|
"red-oranges-attend",
|
||||||
|
"red-suns-wash"
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
Integrate OpenAI as a new AI provider.
|
|
||||||
- Enhance `models` command/tool to display API key status.
|
|
||||||
- Implement model-specific `maxTokens` override based on `supported-models.json` to save you if you use an incorrect max token value.
|
|
||||||
5
.changeset/red-oranges-attend.md
Normal file
5
.changeset/red-oranges-attend.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix ERR_MODULE_NOT_FOUND when trying to run MCP Server
|
||||||
@@ -2,4 +2,4 @@
|
|||||||
'task-master-ai': patch
|
'task-master-ai': patch
|
||||||
---
|
---
|
||||||
|
|
||||||
Add integration for Roo Code
|
Add src directory to exports
|
||||||
5
.changeset/sharp-dingos-melt.md
Normal file
5
.changeset/sharp-dingos-melt.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix the error handling of task status settings
|
||||||
7
.changeset/six-cloths-happen.md
Normal file
7
.changeset/six-cloths-happen.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Remove caching layer from MCP direct functions for task listing, next task, and complexity report
|
||||||
|
|
||||||
|
- Fixes issues users where having where they were getting stale data
|
||||||
5
.changeset/slow-singers-swim.md
Normal file
5
.changeset/slow-singers-swim.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix for issue #409 LOG_LEVEL Pydantic validation error
|
||||||
5
.changeset/social-masks-fold.md
Normal file
5
.changeset/social-masks-fold.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': minor
|
||||||
|
---
|
||||||
|
|
||||||
|
Display task complexity scores in task lists, next task, and task details views.
|
||||||
7
.changeset/soft-zoos-flow.md
Normal file
7
.changeset/soft-zoos-flow.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix initial .env.example to work out of the box
|
||||||
|
|
||||||
|
- Closes #419
|
||||||
5
.changeset/ten-ways-mate.md
Normal file
5
.changeset/ten-ways-mate.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix default fallback model and maxTokens in Taskmaster initialization
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
Tweaks Perplexity AI calls for research mode to max out input tokens and get day-fresh information
|
|
||||||
- Forces temp at 0.1 for highly deterministic output, no variations
|
|
||||||
- Adds a system prompt to further improve the output
|
|
||||||
- Correctly uses the maximum input tokens (8,719, used 8,700) for perplexity
|
|
||||||
- Specificies to use a high degree of research across the web
|
|
||||||
- Specifies to use information that is as fresh as today; this support stuff like capturing brand new announcements like new GPT models and being able to query for those in research. 🔥
|
|
||||||
5
.changeset/tricky-wombats-spend.md
Normal file
5
.changeset/tricky-wombats-spend.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix bug when updating tasks on the MCP server (#412)
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix --task to --num-tasks in ui + related tests - issue #324
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Adds a 'models' CLI and MCP command to get the current model configuration, available models, and gives the ability to set main/research/fallback models."
|
|
||||||
- In the CLI, `task-master models` shows the current models config. Using the `--setup` flag launches an interactive set up that allows you to easily select the models you want to use for each of the three roles. Use `q` during the interactive setup to cancel the setup.
|
|
||||||
- In the MCP, responses are simplified in RESTful format (instead of the full CLI output). The agent can use the `models` tool with different arguments, including `listAvailableModels` to get available models. Run without arguments, it will return the current configuration. Arguments are available to set the model for each of the three roles. This allows you to manage Taskmaster AI providers and models directly from either the CLI or MCP or both.
|
|
||||||
- Updated the CLI help menu when you run `task-master` to include missing commands and .taskmasterconfig information.
|
|
||||||
- Adds `--research` flag to `add-task` so you can hit up Perplexity right from the add-task flow, rather than having to add a task and then update it.
|
|
||||||
11
.changeset/wide-eyes-relax.md
Normal file
11
.changeset/wide-eyes-relax.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix duplicate output on CLI help screen
|
||||||
|
|
||||||
|
- Prevent the Task Master CLI from printing the help screen more than once when using `-h` or `--help`.
|
||||||
|
- Removed redundant manual event handlers and guards for help output; now only the Commander `.helpInformation` override is used for custom help.
|
||||||
|
- Simplified logic so that help is only shown once for both "no arguments" and help flag flows.
|
||||||
|
- Ensures a clean, branded help experience with no repeated content.
|
||||||
|
- Fixes #339
|
||||||
@@ -116,7 +116,7 @@ Taskmaster configuration is managed through two main mechanisms:
|
|||||||
* For MCP/Cursor integration, configure these keys in the `env` section of `.cursor/mcp.json`.
|
* For MCP/Cursor integration, configure these keys in the `env` section of `.cursor/mcp.json`.
|
||||||
* Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.mdc`).
|
* Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.mdc`).
|
||||||
|
|
||||||
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
|
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
|
||||||
**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.cursor/mcp.json`.
|
**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.cursor/mcp.json`.
|
||||||
**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.
|
**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.
|
||||||
|
|
||||||
|
|||||||
62
.github/workflows/pre-release.yml
vendored
Normal file
62
.github/workflows/pre-release.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
name: Pre-Release (RC)
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch: # Allows manual triggering from GitHub UI/API
|
||||||
|
|
||||||
|
concurrency: pre-release-${{ github.ref }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
rc:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
cache: 'npm'
|
||||||
|
|
||||||
|
- name: Cache node_modules
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
node_modules
|
||||||
|
*/*/node_modules
|
||||||
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-node-
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: npm ci
|
||||||
|
timeout-minutes: 2
|
||||||
|
|
||||||
|
- name: Enter RC mode
|
||||||
|
run: |
|
||||||
|
npx changeset pre exit || true
|
||||||
|
npx changeset pre enter rc
|
||||||
|
|
||||||
|
- name: Version RC packages
|
||||||
|
run: npx changeset version
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||||
|
|
||||||
|
- name: Create Release Candidate Pull Request or Publish Release Candidate to npm
|
||||||
|
uses: changesets/action@v1
|
||||||
|
with:
|
||||||
|
publish: npm run release
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||||
|
|
||||||
|
- name: Exit RC mode
|
||||||
|
run: npx changeset pre exit
|
||||||
|
|
||||||
|
- name: Commit & Push changes
|
||||||
|
uses: actions-js/push@master
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
branch: ${{ github.ref }}
|
||||||
|
message: 'chore: rc version bump'
|
||||||
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@@ -33,6 +33,9 @@ jobs:
|
|||||||
run: npm ci
|
run: npm ci
|
||||||
timeout-minutes: 2
|
timeout-minutes: 2
|
||||||
|
|
||||||
|
- name: Exit pre-release mode (safety check)
|
||||||
|
run: npx changeset pre exit || true
|
||||||
|
|
||||||
- name: Create Release Pull Request or Publish to npm
|
- name: Create Release Pull Request or Publish to npm
|
||||||
uses: changesets/action@v1
|
uses: changesets/action@v1
|
||||||
with:
|
with:
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -61,3 +61,6 @@ dist
|
|||||||
*.debug
|
*.debug
|
||||||
init-debug.log
|
init-debug.log
|
||||||
dev-debug.log
|
dev-debug.log
|
||||||
|
|
||||||
|
# NPMRC
|
||||||
|
.npmrc
|
||||||
|
|||||||
72
CHANGELOG.md
72
CHANGELOG.md
@@ -1,5 +1,77 @@
|
|||||||
# task-master-ai
|
# task-master-ai
|
||||||
|
|
||||||
|
## 0.13.1
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#399](https://github.com/eyaltoledano/claude-task-master/pull/399) [`734a4fd`](https://github.com/eyaltoledano/claude-task-master/commit/734a4fdcfc89c2e089255618cf940561ad13a3c8) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix ERR_MODULE_NOT_FOUND when trying to run MCP Server
|
||||||
|
|
||||||
|
## 0.13.0
|
||||||
|
|
||||||
|
### Minor Changes
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`ef782ff`](https://github.com/eyaltoledano/claude-task-master/commit/ef782ff5bd4ceb3ed0dc9ea82087aae5f79ac933) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - feat(expand): Enhance `expand` and `expand-all` commands
|
||||||
|
|
||||||
|
- Integrate `task-complexity-report.json` to automatically determine the number of subtasks and use tailored prompts for expansion based on prior analysis. You no longer need to try copy-pasting the recommended prompt. If it exists, it will use it for you. You can just run `task-master update --id=[id of task] --research` and it will use that prompt automatically. No extra prompt needed.
|
||||||
|
- Change default behavior to _append_ new subtasks to existing ones. Use the `--force` flag to clear existing subtasks before expanding. This is helpful if you need to add more subtasks to a task but you want to do it by the batch from a given prompt. Use force if you want to start fresh with a task's subtasks.
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`87d97bb`](https://github.com/eyaltoledano/claude-task-master/commit/87d97bba00d84e905756d46ef96b2d5b984e0f38) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds support for the OpenRouter AI provider. Users can now configure models available through OpenRouter (requiring an `OPENROUTER_API_KEY`) via the `task-master models` command, granting access to a wide range of additional LLMs. - IMPORTANT FYI ABOUT OPENROUTER: Taskmaster relies on AI SDK, which itself relies on tool use. It looks like **free** models sometimes do not include tool use. For example, Gemini 2.5 pro (free) failed via OpenRouter (no tool use) but worked fine on the paid version of the model. Custom model support for Open Router is considered experimental and likely will not be further improved for some time.
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`1ab836f`](https://github.com/eyaltoledano/claude-task-master/commit/1ab836f191cb8969153593a9a0bd47fc9aa4a831) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds model management and new configuration file .taskmasterconfig which houses the models used for main, research and fallback. Adds models command and setter flags. Adds a --setup flag with an interactive setup. We should be calling this during init. Shows a table of active and available models when models is called without flags. Includes SWE scores and token costs, which are manually entered into the supported_models.json, the new place where models are defined for support. Config-manager.js is the core module responsible for managing the new config."
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`c8722b0`](https://github.com/eyaltoledano/claude-task-master/commit/c8722b0a7a443a73b95d1bcd4a0b68e0fce2a1cd) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds custom model ID support for Ollama and OpenRouter providers.
|
||||||
|
|
||||||
|
- Adds the `--ollama` and `--openrouter` flags to `task-master models --set-<role>` command to set models for those providers outside of the support models list.
|
||||||
|
- Updated `task-master models --setup` interactive mode with options to explicitly enter custom Ollama or OpenRouter model IDs.
|
||||||
|
- Implemented live validation against OpenRouter API (`/api/v1/models`) when setting a custom OpenRouter model ID (via flag or setup).
|
||||||
|
- Refined logic to prioritize explicit provider flags/choices over internal model list lookups in case of ID conflicts.
|
||||||
|
- Added warnings when setting custom/unvalidated models.
|
||||||
|
- We obviously don't recommend going with a custom, unproven model. If you do and find performance is good, please let us know so we can add it to the list of supported models.
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`2517bc1`](https://github.com/eyaltoledano/claude-task-master/commit/2517bc112c9a497110f3286ca4bfb4130c9addcb) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Integrate OpenAI as a new AI provider. - Enhance `models` command/tool to display API key status. - Implement model-specific `maxTokens` override based on `supported-models.json` to save you if you use an incorrect max token value.
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`9a48278`](https://github.com/eyaltoledano/claude-task-master/commit/9a482789f7894f57f655fb8d30ba68542bd0df63) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Tweaks Perplexity AI calls for research mode to max out input tokens and get day-fresh information - Forces temp at 0.1 for highly deterministic output, no variations - Adds a system prompt to further improve the output - Correctly uses the maximum input tokens (8,719, used 8,700) for perplexity - Specificies to use a high degree of research across the web - Specifies to use information that is as fresh as today; this support stuff like capturing brand new announcements like new GPT models and being able to query for those in research. 🔥
|
||||||
|
|
||||||
|
### Patch Changes
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`842eaf7`](https://github.com/eyaltoledano/claude-task-master/commit/842eaf722498ddf7307800b4cdcef4ac4fd7e5b0) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - - Add support for Google Gemini models via Vercel AI SDK integration.
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`ed79d4f`](https://github.com/eyaltoledano/claude-task-master/commit/ed79d4f4735dfab4124fa189214c0bd5e23a6860) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Add xAI provider and Grok models support
|
||||||
|
|
||||||
|
- [#378](https://github.com/eyaltoledano/claude-task-master/pull/378) [`ad89253`](https://github.com/eyaltoledano/claude-task-master/commit/ad89253e313a395637aa48b9f92cc39b1ef94ad8) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Better support for file paths on Windows, Linux & WSL.
|
||||||
|
|
||||||
|
- Standardizes handling of different path formats (URI encoded, Windows, Linux, WSL).
|
||||||
|
- Ensures tools receive a clean, absolute path suitable for the server OS.
|
||||||
|
- Simplifies tool implementation by centralizing normalization logic.
|
||||||
|
|
||||||
|
- [#285](https://github.com/eyaltoledano/claude-task-master/pull/285) [`2acba94`](https://github.com/eyaltoledano/claude-task-master/commit/2acba945c0afee9460d8af18814c87e80f747e9f) Thanks [@neno-is-ooo](https://github.com/neno-is-ooo)! - Add integration for Roo Code
|
||||||
|
|
||||||
|
- [#378](https://github.com/eyaltoledano/claude-task-master/pull/378) [`d63964a`](https://github.com/eyaltoledano/claude-task-master/commit/d63964a10eed9be17856757661ff817ad6bacfdc) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Improved update-subtask - Now it has context about the parent task details - It also has context about the subtask before it and the subtask after it (if they exist) - Not passing all subtasks to stay token efficient
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`5f504fa`](https://github.com/eyaltoledano/claude-task-master/commit/5f504fafb8bdaa0043c2d20dee8bbb8ec2040d85) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Improve and adjust `init` command for robustness and updated dependencies.
|
||||||
|
|
||||||
|
- **Update Initialization Dependencies:** Ensure newly initialized projects (`task-master init`) include all required AI SDK dependencies (`@ai-sdk/*`, `ai`, provider wrappers) in their `package.json` for out-of-the-box AI feature compatibility. Remove unnecessary dependencies (e.g., `uuid`) from the init template.
|
||||||
|
- **Silence `npm install` during `init`:** Prevent `npm install` output from interfering with non-interactive/MCP initialization by suppressing its stdio in silent mode.
|
||||||
|
- **Improve Conditional Model Setup:** Reliably skip interactive `models --setup` during non-interactive `init` runs (e.g., `init -y` or MCP) by checking `isSilentMode()` instead of passing flags.
|
||||||
|
- **Refactor `init.js`:** Remove internal `isInteractive` flag logic.
|
||||||
|
- **Update `init` Instructions:** Tweak the "Getting Started" text displayed after `init`.
|
||||||
|
- **Fix MCP Server Launch:** Update `.cursor/mcp.json` template to use `node ./mcp-server/server.js` instead of `npx task-master-mcp`.
|
||||||
|
- **Update Default Model:** Change the default main model in the `.taskmasterconfig` template.
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`96aeeff`](https://github.com/eyaltoledano/claude-task-master/commit/96aeeffc195372722c6a07370540e235bfe0e4d8) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Fixes an issue with add-task which did not use the manually defined properties and still needlessly hit the AI endpoint.
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`5aea93d`](https://github.com/eyaltoledano/claude-task-master/commit/5aea93d4c0490c242d7d7042a210611977848e0a) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Fixes an issue that prevented remove-subtask with comma separated tasks/subtasks from being deleted (only the first ID was being deleted). Closes #140
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`66ac9ab`](https://github.com/eyaltoledano/claude-task-master/commit/66ac9ab9f66d006da518d6e8a3244e708af2764d) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Improves next command to be subtask-aware - The logic for determining the "next task" (findNextTask function, used by task-master next and the next_task MCP tool) has been significantly improved. Previously, it only considered top-level tasks, making its recommendation less useful when a parent task containing subtasks was already marked 'in-progress'. - The updated logic now prioritizes finding the next available subtask within any 'in-progress' parent task, considering subtask dependencies and priority. - If no suitable subtask is found within active parent tasks, it falls back to recommending the next eligible top-level task based on the original criteria (status, dependencies, priority).
|
||||||
|
|
||||||
|
This change makes the next command much more relevant and helpful during the implementation phase of complex tasks.
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`ca7b045`](https://github.com/eyaltoledano/claude-task-master/commit/ca7b0457f1dc65fd9484e92527d9fd6d69db758d) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Add `--status` flag to `show` command to filter displayed subtasks.
|
||||||
|
|
||||||
|
- [#328](https://github.com/eyaltoledano/claude-task-master/pull/328) [`5a2371b`](https://github.com/eyaltoledano/claude-task-master/commit/5a2371b7cc0c76f5e95d43921c1e8cc8081bf14e) Thanks [@knoxgraeme](https://github.com/knoxgraeme)! - Fix --task to --num-tasks in ui + related tests - issue #324
|
||||||
|
|
||||||
|
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`6cb213e`](https://github.com/eyaltoledano/claude-task-master/commit/6cb213ebbd51116ae0688e35b575d09443d17c3b) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds a 'models' CLI and MCP command to get the current model configuration, available models, and gives the ability to set main/research/fallback models." - In the CLI, `task-master models` shows the current models config. Using the `--setup` flag launches an interactive set up that allows you to easily select the models you want to use for each of the three roles. Use `q` during the interactive setup to cancel the setup. - In the MCP, responses are simplified in RESTful format (instead of the full CLI output). The agent can use the `models` tool with different arguments, including `listAvailableModels` to get available models. Run without arguments, it will return the current configuration. Arguments are available to set the model for each of the three roles. This allows you to manage Taskmaster AI providers and models directly from either the CLI or MCP or both. - Updated the CLI help menu when you run `task-master` to include missing commands and .taskmasterconfig information. - Adds `--research` flag to `add-task` so you can hit up Perplexity right from the add-task flow, rather than having to add a task and then update it.
|
||||||
|
|
||||||
## 0.12.1
|
## 0.12.1
|
||||||
|
|
||||||
### Patch Changes
|
### Patch Changes
|
||||||
|
|||||||
@@ -14,8 +14,8 @@
|
|||||||
},
|
},
|
||||||
"fallback": {
|
"fallback": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3.5-sonnet-20240620",
|
"modelId": "claude-3-5-sonnet-20240620",
|
||||||
"maxTokens": 120000,
|
"maxTokens": 8192,
|
||||||
"temperature": 0.1
|
"temperature": 0.1
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -198,7 +198,7 @@ alwaysApply: true
|
|||||||
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
|
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
|
||||||
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
|
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
|
||||||
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
|
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
|
||||||
- **LOG_LEVEL** (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`)
|
- **TASKMASTER_LOG_LEVEL** (Default: `"info"`): Console output level (Example: `TASKMASTER_LOG_LEVEL=debug`)
|
||||||
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
|
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
|
||||||
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
|
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
|
||||||
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
|
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
# API Keys (Required to enable respective provider)
|
# API Keys (Required to enable respective provider)
|
||||||
ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-...
|
ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Required: Format: sk-ant-api03-...
|
||||||
PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-...
|
PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Optional: Format: pplx-...
|
||||||
OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
|
OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
|
||||||
GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models.
|
GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models.
|
||||||
MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models.
|
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
|
||||||
XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models.
|
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
|
||||||
AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
||||||
@@ -31,7 +31,7 @@ Task Master configuration is now managed through two primary methods:
|
|||||||
- Create a `.env` file in your project root for CLI usage.
|
- Create a `.env` file in your project root for CLI usage.
|
||||||
- See `assets/env.example` for required key names.
|
- See `assets/env.example` for required key names.
|
||||||
|
|
||||||
**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.
|
**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `TASKMASTER_LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.
|
||||||
|
|
||||||
## How It Works
|
## How It Works
|
||||||
|
|
||||||
@@ -42,7 +42,7 @@ Task Master configuration is now managed through two primary methods:
|
|||||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||||
|
|
||||||
2. **CLI Commands**
|
2. **CLI Commands**
|
||||||
You can run the commands via:
|
You can run the commands via:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -200,7 +200,7 @@ Notes:
|
|||||||
|
|
||||||
## Logging
|
## Logging
|
||||||
|
|
||||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:
|
||||||
|
|
||||||
- `debug`: Detailed information, typically useful for troubleshooting
|
- `debug`: Detailed information, typically useful for troubleshooting
|
||||||
- `info`: Confirmation that things are working as expected (default)
|
- `info`: Confirmation that things are working as expected (default)
|
||||||
|
|||||||
@@ -15,13 +15,15 @@ Taskmaster uses two primary methods for configuration:
|
|||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3-7-sonnet-20250219",
|
"modelId": "claude-3-7-sonnet-20250219",
|
||||||
"maxTokens": 64000,
|
"maxTokens": 64000,
|
||||||
"temperature": 0.2
|
"temperature": 0.2,
|
||||||
|
"baseUrl": "https://api.anthropic.com/v1"
|
||||||
},
|
},
|
||||||
"research": {
|
"research": {
|
||||||
"provider": "perplexity",
|
"provider": "perplexity",
|
||||||
"modelId": "sonar-pro",
|
"modelId": "sonar-pro",
|
||||||
"maxTokens": 8700,
|
"maxTokens": 8700,
|
||||||
"temperature": 0.1
|
"temperature": 0.1,
|
||||||
|
"baseUrl": "https://api.perplexity.ai/v1"
|
||||||
},
|
},
|
||||||
"fallback": {
|
"fallback": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
@@ -56,8 +58,9 @@ Taskmaster uses two primary methods for configuration:
|
|||||||
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
||||||
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
||||||
- `XAI_API_KEY`: Your X-AI API key.
|
- `XAI_API_KEY`: Your X-AI API key.
|
||||||
- **Optional Endpoint Overrides (in .taskmasterconfig):**
|
- **Optional Endpoint Overrides:**
|
||||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key.
|
- **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||||
|
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role).
|
||||||
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
||||||
|
|
||||||
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import {
|
|||||||
enableSilentMode,
|
enableSilentMode,
|
||||||
disableSilentMode
|
disableSilentMode
|
||||||
} from '../../../../scripts/modules/utils.js';
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
||||||
@@ -86,30 +85,20 @@ export async function complexityReportDirect(args, log) {
|
|||||||
|
|
||||||
// Use the caching utility
|
// Use the caching utility
|
||||||
try {
|
try {
|
||||||
const result = await getCachedOrExecute({
|
const result = await coreActionFn();
|
||||||
cacheKey,
|
log.info('complexityReportDirect completed');
|
||||||
actionFn: coreActionFn,
|
return result;
|
||||||
log
|
|
||||||
});
|
|
||||||
log.info(
|
|
||||||
`complexityReportDirect completed. From cache: ${result.fromCache}`
|
|
||||||
);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Catch unexpected errors from getCachedOrExecute itself
|
|
||||||
// Ensure silent mode is disabled
|
// Ensure silent mode is disabled
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|
||||||
log.error(
|
log.error(`Unexpected error during complexityReport: ${error.message}`);
|
||||||
`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`
|
|
||||||
);
|
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'UNEXPECTED_ERROR',
|
code: 'UNEXPECTED_ERROR',
|
||||||
message: error.message
|
message: error.message
|
||||||
},
|
}
|
||||||
fromCache: false
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
|
||||||
import {
|
import {
|
||||||
enableSilentMode,
|
enableSilentMode,
|
||||||
disableSilentMode
|
disableSilentMode
|
||||||
@@ -19,7 +18,7 @@ import {
|
|||||||
*/
|
*/
|
||||||
export async function listTasksDirect(args, log) {
|
export async function listTasksDirect(args, log) {
|
||||||
// Destructure the explicit tasksJsonPath from args
|
// Destructure the explicit tasksJsonPath from args
|
||||||
const { tasksJsonPath, status, withSubtasks } = args;
|
const { tasksJsonPath, reportPath, status, withSubtasks } = args;
|
||||||
|
|
||||||
if (!tasksJsonPath) {
|
if (!tasksJsonPath) {
|
||||||
log.error('listTasksDirect called without tasksJsonPath');
|
log.error('listTasksDirect called without tasksJsonPath');
|
||||||
@@ -36,7 +35,6 @@ export async function listTasksDirect(args, log) {
|
|||||||
// Use the explicit tasksJsonPath for cache key
|
// Use the explicit tasksJsonPath for cache key
|
||||||
const statusFilter = status || 'all';
|
const statusFilter = status || 'all';
|
||||||
const withSubtasksFilter = withSubtasks || false;
|
const withSubtasksFilter = withSubtasks || false;
|
||||||
const cacheKey = `listTasks:${tasksJsonPath}:${statusFilter}:${withSubtasksFilter}`;
|
|
||||||
|
|
||||||
// Define the action function to be executed on cache miss
|
// Define the action function to be executed on cache miss
|
||||||
const coreListTasksAction = async () => {
|
const coreListTasksAction = async () => {
|
||||||
@@ -51,6 +49,7 @@ export async function listTasksDirect(args, log) {
|
|||||||
const resultData = listTasks(
|
const resultData = listTasks(
|
||||||
tasksJsonPath,
|
tasksJsonPath,
|
||||||
statusFilter,
|
statusFilter,
|
||||||
|
reportPath,
|
||||||
withSubtasksFilter,
|
withSubtasksFilter,
|
||||||
'json'
|
'json'
|
||||||
);
|
);
|
||||||
@@ -65,6 +64,7 @@ export async function listTasksDirect(args, log) {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
|
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
|
||||||
);
|
);
|
||||||
@@ -88,25 +88,19 @@ export async function listTasksDirect(args, log) {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Use the caching utility
|
|
||||||
try {
|
try {
|
||||||
const result = await getCachedOrExecute({
|
const result = await coreListTasksAction();
|
||||||
cacheKey,
|
log.info('listTasksDirect completed');
|
||||||
actionFn: coreListTasksAction,
|
return result;
|
||||||
log
|
|
||||||
});
|
|
||||||
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
log.error(`Unexpected error during listTasks: ${error.message}`);
|
||||||
log.error(
|
|
||||||
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
|
|
||||||
);
|
|
||||||
console.error(error.stack);
|
console.error(error.stack);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: { code: 'CACHE_UTIL_ERROR', message: error.message },
|
error: {
|
||||||
fromCache: false
|
code: 'UNEXPECTED_ERROR',
|
||||||
|
message: error.message
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,10 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { findNextTask } from '../../../../scripts/modules/task-manager.js';
|
import { findNextTask } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
import {
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
readJSON,
|
||||||
|
readComplexityReport
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import {
|
import {
|
||||||
enableSilentMode,
|
enableSilentMode,
|
||||||
disableSilentMode
|
disableSilentMode
|
||||||
@@ -21,7 +23,7 @@ import {
|
|||||||
*/
|
*/
|
||||||
export async function nextTaskDirect(args, log) {
|
export async function nextTaskDirect(args, log) {
|
||||||
// Destructure expected args
|
// Destructure expected args
|
||||||
const { tasksJsonPath } = args;
|
const { tasksJsonPath, reportPath } = args;
|
||||||
|
|
||||||
if (!tasksJsonPath) {
|
if (!tasksJsonPath) {
|
||||||
log.error('nextTaskDirect called without tasksJsonPath');
|
log.error('nextTaskDirect called without tasksJsonPath');
|
||||||
@@ -35,9 +37,6 @@ export async function nextTaskDirect(args, log) {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate cache key using the provided task path
|
|
||||||
const cacheKey = `nextTask:${tasksJsonPath}`;
|
|
||||||
|
|
||||||
// Define the action function to be executed on cache miss
|
// Define the action function to be executed on cache miss
|
||||||
const coreNextTaskAction = async () => {
|
const coreNextTaskAction = async () => {
|
||||||
try {
|
try {
|
||||||
@@ -59,8 +58,11 @@ export async function nextTaskDirect(args, log) {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read the complexity report
|
||||||
|
const complexityReport = readComplexityReport(reportPath);
|
||||||
|
|
||||||
// Find the next task
|
// Find the next task
|
||||||
const nextTask = findNextTask(data.tasks);
|
const nextTask = findNextTask(data.tasks, complexityReport);
|
||||||
|
|
||||||
if (!nextTask) {
|
if (!nextTask) {
|
||||||
log.info(
|
log.info(
|
||||||
@@ -118,18 +120,11 @@ export async function nextTaskDirect(args, log) {
|
|||||||
|
|
||||||
// Use the caching utility
|
// Use the caching utility
|
||||||
try {
|
try {
|
||||||
const result = await getCachedOrExecute({
|
const result = await coreNextTaskAction();
|
||||||
cacheKey,
|
log.info(`nextTaskDirect completed.`);
|
||||||
actionFn: coreNextTaskAction,
|
return result;
|
||||||
log
|
|
||||||
});
|
|
||||||
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
|
|
||||||
return result; // Returns { success, data/error, fromCache }
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Catch unexpected errors from getCachedOrExecute itself
|
log.error(`Unexpected error during nextTask: ${error.message}`);
|
||||||
log.error(
|
|
||||||
`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`
|
|
||||||
);
|
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
|
|||||||
@@ -3,11 +3,10 @@
|
|||||||
* Direct function implementation for showing task details
|
* Direct function implementation for showing task details
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { findTaskById, readJSON } from '../../../../scripts/modules/utils.js';
|
|
||||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
|
||||||
import {
|
import {
|
||||||
enableSilentMode,
|
findTaskById,
|
||||||
disableSilentMode
|
readComplexityReport,
|
||||||
|
readJSON
|
||||||
} from '../../../../scripts/modules/utils.js';
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||||
|
|
||||||
@@ -17,6 +16,7 @@ import { findTasksJsonPath } from '../utils/path-utils.js';
|
|||||||
* @param {Object} args - Command arguments.
|
* @param {Object} args - Command arguments.
|
||||||
* @param {string} args.id - Task ID to show.
|
* @param {string} args.id - Task ID to show.
|
||||||
* @param {string} [args.file] - Optional path to the tasks file (passed to findTasksJsonPath).
|
* @param {string} [args.file] - Optional path to the tasks file (passed to findTasksJsonPath).
|
||||||
|
* @param {string} args.reportPath - Explicit path to the complexity report file.
|
||||||
* @param {string} [args.status] - Optional status to filter subtasks by.
|
* @param {string} [args.status] - Optional status to filter subtasks by.
|
||||||
* @param {string} args.projectRoot - Absolute path to the project root directory (already normalized by tool).
|
* @param {string} args.projectRoot - Absolute path to the project root directory (already normalized by tool).
|
||||||
* @param {Object} log - Logger object.
|
* @param {Object} log - Logger object.
|
||||||
@@ -27,7 +27,7 @@ export async function showTaskDirect(args, log) {
|
|||||||
// Destructure session from context if needed later, otherwise ignore
|
// Destructure session from context if needed later, otherwise ignore
|
||||||
// const { session } = context;
|
// const { session } = context;
|
||||||
// Destructure projectRoot and other args. projectRoot is assumed normalized.
|
// Destructure projectRoot and other args. projectRoot is assumed normalized.
|
||||||
const { id, file, status, projectRoot } = args;
|
const { id, file, reportPath, status, projectRoot } = args;
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
`Showing task direct function. ID: ${id}, File: ${file}, Status Filter: ${status}, ProjectRoot: ${projectRoot}`
|
`Showing task direct function. ID: ${id}, File: ${file}, Status Filter: ${status}, ProjectRoot: ${projectRoot}`
|
||||||
@@ -64,9 +64,12 @@ export async function showTaskDirect(args, log) {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const complexityReport = readComplexityReport(reportPath);
|
||||||
|
|
||||||
const { task, originalSubtaskCount } = findTaskById(
|
const { task, originalSubtaskCount } = findTaskById(
|
||||||
tasksData.tasks,
|
tasksData.tasks,
|
||||||
id,
|
id,
|
||||||
|
complexityReport,
|
||||||
status
|
status
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,10 @@
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { createLogWrapper } from '../../tools/utils.js';
|
import { createLogWrapper } from '../../tools/utils.js';
|
||||||
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for updating tasks based on new context.
|
* Direct function wrapper for updating tasks based on new context.
|
||||||
|
|||||||
@@ -339,6 +339,49 @@ export function findPRDDocumentPath(projectRoot, explicitPath, log) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function findComplexityReportPath(projectRoot, explicitPath, log) {
|
||||||
|
// If explicit path is provided, check if it exists
|
||||||
|
if (explicitPath) {
|
||||||
|
const fullPath = path.isAbsolute(explicitPath)
|
||||||
|
? explicitPath
|
||||||
|
: path.resolve(projectRoot, explicitPath);
|
||||||
|
|
||||||
|
if (fs.existsSync(fullPath)) {
|
||||||
|
log.info(`Using provided PRD document path: ${fullPath}`);
|
||||||
|
return fullPath;
|
||||||
|
} else {
|
||||||
|
log.warn(
|
||||||
|
`Provided PRD document path not found: ${fullPath}, will search for alternatives`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Common locations and file patterns for PRD documents
|
||||||
|
const commonLocations = [
|
||||||
|
'', // Project root
|
||||||
|
'scripts/'
|
||||||
|
];
|
||||||
|
|
||||||
|
const commonFileNames = [
|
||||||
|
'complexity-report.json',
|
||||||
|
'task-complexity-report.json'
|
||||||
|
];
|
||||||
|
|
||||||
|
// Check all possible combinations
|
||||||
|
for (const location of commonLocations) {
|
||||||
|
for (const fileName of commonFileNames) {
|
||||||
|
const potentialPath = path.join(projectRoot, location, fileName);
|
||||||
|
if (fs.existsSync(potentialPath)) {
|
||||||
|
log.info(`Found PRD document at: ${potentialPath}`);
|
||||||
|
return potentialPath;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.warn(`No PRD document found in common locations within ${projectRoot}`);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolves the tasks output directory path
|
* Resolves the tasks output directory path
|
||||||
* @param {string} projectRoot - The project root directory
|
* @param {string} projectRoot - The project root directory
|
||||||
|
|||||||
@@ -10,7 +10,10 @@ import {
|
|||||||
withNormalizedProjectRoot
|
withNormalizedProjectRoot
|
||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { showTaskDirect } from '../core/task-master-core.js';
|
import { showTaskDirect } from '../core/task-master-core.js';
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
import {
|
||||||
|
findTasksJsonPath,
|
||||||
|
findComplexityReportPath
|
||||||
|
} from '../core/utils/path-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Custom processor function that removes allTasks from the response
|
* Custom processor function that removes allTasks from the response
|
||||||
@@ -50,6 +53,12 @@ export function registerShowTaskTool(server) {
|
|||||||
.string()
|
.string()
|
||||||
.optional()
|
.optional()
|
||||||
.describe('Path to the tasks file relative to project root'),
|
.describe('Path to the tasks file relative to project root'),
|
||||||
|
complexityReport: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Path to the complexity report file (relative to project root or absolute)'
|
||||||
|
),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.optional()
|
.optional()
|
||||||
@@ -81,9 +90,22 @@ export function registerShowTaskTool(server) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call the direct function, passing the normalized projectRoot
|
// Call the direct function, passing the normalized projectRoot
|
||||||
|
// Resolve the path to complexity report
|
||||||
|
let complexityReportPath;
|
||||||
|
try {
|
||||||
|
complexityReportPath = findComplexityReportPath(
|
||||||
|
projectRoot,
|
||||||
|
args.complexityReport,
|
||||||
|
log
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error finding complexity report: ${error.message}`);
|
||||||
|
}
|
||||||
const result = await showTaskDirect(
|
const result = await showTaskDirect(
|
||||||
{
|
{
|
||||||
tasksJsonPath: tasksJsonPath,
|
tasksJsonPath: tasksJsonPath,
|
||||||
|
reportPath: complexityReportPath,
|
||||||
|
// Pass other relevant args
|
||||||
id: id,
|
id: id,
|
||||||
status: status,
|
status: status,
|
||||||
projectRoot: projectRoot
|
projectRoot: projectRoot
|
||||||
|
|||||||
@@ -10,7 +10,10 @@ import {
|
|||||||
withNormalizedProjectRoot
|
withNormalizedProjectRoot
|
||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { listTasksDirect } from '../core/task-master-core.js';
|
import { listTasksDirect } from '../core/task-master-core.js';
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
import {
|
||||||
|
findTasksJsonPath,
|
||||||
|
findComplexityReportPath
|
||||||
|
} from '../core/utils/path-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the getTasks tool with the MCP server
|
* Register the getTasks tool with the MCP server
|
||||||
@@ -38,6 +41,12 @@ export function registerListTasksTool(server) {
|
|||||||
.describe(
|
.describe(
|
||||||
'Path to the tasks file (relative to project root or absolute)'
|
'Path to the tasks file (relative to project root or absolute)'
|
||||||
),
|
),
|
||||||
|
complexityReport: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Path to the complexity report file (relative to project root or absolute)'
|
||||||
|
),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
.describe('The directory of the project. Must be an absolute path.')
|
||||||
@@ -60,11 +69,23 @@ export function registerListTasksTool(server) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resolve the path to complexity report
|
||||||
|
let complexityReportPath;
|
||||||
|
try {
|
||||||
|
complexityReportPath = findComplexityReportPath(
|
||||||
|
args.projectRoot,
|
||||||
|
args.complexityReport,
|
||||||
|
log
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error finding complexity report: ${error.message}`);
|
||||||
|
}
|
||||||
const result = await listTasksDirect(
|
const result = await listTasksDirect(
|
||||||
{
|
{
|
||||||
tasksJsonPath: tasksJsonPath,
|
tasksJsonPath: tasksJsonPath,
|
||||||
status: args.status,
|
status: args.status,
|
||||||
withSubtasks: args.withSubtasks
|
withSubtasks: args.withSubtasks,
|
||||||
|
reportPath: complexityReportPath
|
||||||
},
|
},
|
||||||
log
|
log
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -10,7 +10,10 @@ import {
|
|||||||
withNormalizedProjectRoot
|
withNormalizedProjectRoot
|
||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { nextTaskDirect } from '../core/task-master-core.js';
|
import { nextTaskDirect } from '../core/task-master-core.js';
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
import {
|
||||||
|
findTasksJsonPath,
|
||||||
|
findComplexityReportPath
|
||||||
|
} from '../core/utils/path-utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the next-task tool with the MCP server
|
* Register the next-task tool with the MCP server
|
||||||
@@ -23,6 +26,12 @@ export function registerNextTaskTool(server) {
|
|||||||
'Find the next task to work on based on dependencies and status',
|
'Find the next task to work on based on dependencies and status',
|
||||||
parameters: z.object({
|
parameters: z.object({
|
||||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
file: z.string().optional().describe('Absolute path to the tasks file'),
|
||||||
|
complexityReport: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Path to the complexity report file (relative to project root or absolute)'
|
||||||
|
),
|
||||||
projectRoot: z
|
projectRoot: z
|
||||||
.string()
|
.string()
|
||||||
.describe('The directory of the project. Must be an absolute path.')
|
.describe('The directory of the project. Must be an absolute path.')
|
||||||
@@ -45,9 +54,21 @@ export function registerNextTaskTool(server) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resolve the path to complexity report
|
||||||
|
let complexityReportPath;
|
||||||
|
try {
|
||||||
|
complexityReportPath = findComplexityReportPath(
|
||||||
|
args.projectRoot,
|
||||||
|
args.complexityReport,
|
||||||
|
log
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
log.error(`Error finding complexity report: ${error.message}`);
|
||||||
|
}
|
||||||
const result = await nextTaskDirect(
|
const result = await nextTaskDirect(
|
||||||
{
|
{
|
||||||
tasksJsonPath: tasksJsonPath
|
tasksJsonPath: tasksJsonPath,
|
||||||
|
reportPath: complexityReportPath
|
||||||
},
|
},
|
||||||
log
|
log
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import {
|
|||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||||
|
import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the setTaskStatus tool with the MCP server
|
* Register the setTaskStatus tool with the MCP server
|
||||||
@@ -27,7 +28,7 @@ export function registerSetTaskStatusTool(server) {
|
|||||||
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
||||||
),
|
),
|
||||||
status: z
|
status: z
|
||||||
.string()
|
.enum(TASK_STATUS_OPTIONS)
|
||||||
.describe(
|
.describe(
|
||||||
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
||||||
),
|
),
|
||||||
|
|||||||
34
package-lock.json
generated
34
package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.12.1",
|
"version": "0.13.2",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.12.1",
|
"version": "0.13.2",
|
||||||
"license": "MIT WITH Commons-Clause",
|
"license": "MIT WITH Commons-Clause",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/anthropic": "^1.2.10",
|
"@ai-sdk/anthropic": "^1.2.10",
|
||||||
@@ -19,6 +19,9 @@
|
|||||||
"@anthropic-ai/sdk": "^0.39.0",
|
"@anthropic-ai/sdk": "^0.39.0",
|
||||||
"@openrouter/ai-sdk-provider": "^0.4.5",
|
"@openrouter/ai-sdk-provider": "^0.4.5",
|
||||||
"ai": "^4.3.10",
|
"ai": "^4.3.10",
|
||||||
|
"boxen": "^8.0.1",
|
||||||
|
"chalk": "^5.4.1",
|
||||||
|
"cli-table3": "^0.6.5",
|
||||||
"commander": "^11.1.0",
|
"commander": "^11.1.0",
|
||||||
"cors": "^2.8.5",
|
"cors": "^2.8.5",
|
||||||
"dotenv": "^16.3.1",
|
"dotenv": "^16.3.1",
|
||||||
@@ -34,7 +37,8 @@
|
|||||||
"ollama-ai-provider": "^1.2.0",
|
"ollama-ai-provider": "^1.2.0",
|
||||||
"openai": "^4.89.0",
|
"openai": "^4.89.0",
|
||||||
"ora": "^8.2.0",
|
"ora": "^8.2.0",
|
||||||
"uuid": "^11.1.0"
|
"uuid": "^11.1.0",
|
||||||
|
"zod": "^3.23.8"
|
||||||
},
|
},
|
||||||
"bin": {
|
"bin": {
|
||||||
"task-master": "bin/task-master.js",
|
"task-master": "bin/task-master.js",
|
||||||
@@ -45,9 +49,6 @@
|
|||||||
"@changesets/changelog-github": "^0.5.1",
|
"@changesets/changelog-github": "^0.5.1",
|
||||||
"@changesets/cli": "^2.28.1",
|
"@changesets/cli": "^2.28.1",
|
||||||
"@types/jest": "^29.5.14",
|
"@types/jest": "^29.5.14",
|
||||||
"boxen": "^8.0.1",
|
|
||||||
"chalk": "^5.4.1",
|
|
||||||
"cli-table3": "^0.6.5",
|
|
||||||
"execa": "^8.0.1",
|
"execa": "^8.0.1",
|
||||||
"ink": "^5.0.1",
|
"ink": "^5.0.1",
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
@@ -57,8 +58,7 @@
|
|||||||
"prettier": "^3.5.3",
|
"prettier": "^3.5.3",
|
||||||
"react": "^18.3.1",
|
"react": "^18.3.1",
|
||||||
"supertest": "^7.1.0",
|
"supertest": "^7.1.0",
|
||||||
"tsx": "^4.16.2",
|
"tsx": "^4.16.2"
|
||||||
"zod": "^3.23.8"
|
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=14.0.0"
|
"node": ">=14.0.0"
|
||||||
@@ -1238,7 +1238,6 @@
|
|||||||
"version": "1.5.0",
|
"version": "1.5.0",
|
||||||
"resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz",
|
"resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz",
|
||||||
"integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==",
|
"integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -3307,7 +3306,6 @@
|
|||||||
"version": "3.0.1",
|
"version": "3.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
|
||||||
"integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==",
|
"integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==",
|
||||||
"dev": true,
|
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"string-width": "^4.1.0"
|
"string-width": "^4.1.0"
|
||||||
@@ -3317,7 +3315,6 @@
|
|||||||
"version": "5.0.1",
|
"version": "5.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
|
||||||
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
|
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8"
|
"node": ">=8"
|
||||||
@@ -3327,14 +3324,12 @@
|
|||||||
"version": "8.0.0",
|
"version": "8.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
||||||
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
|
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/ansi-align/node_modules/string-width": {
|
"node_modules/ansi-align/node_modules/string-width": {
|
||||||
"version": "4.2.3",
|
"version": "4.2.3",
|
||||||
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
|
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
|
||||||
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
|
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"emoji-regex": "^8.0.0",
|
"emoji-regex": "^8.0.0",
|
||||||
@@ -3349,7 +3344,6 @@
|
|||||||
"version": "6.0.1",
|
"version": "6.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
|
||||||
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
|
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"ansi-regex": "^5.0.1"
|
"ansi-regex": "^5.0.1"
|
||||||
@@ -3699,7 +3693,6 @@
|
|||||||
"version": "8.0.1",
|
"version": "8.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/boxen/-/boxen-8.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/boxen/-/boxen-8.0.1.tgz",
|
||||||
"integrity": "sha512-F3PH5k5juxom4xktynS7MoFY+NUWH5LC4CnH11YB8NPew+HLpmBLCybSAEyb2F+4pRXhuhWqFesoQd6DAyc2hw==",
|
"integrity": "sha512-F3PH5k5juxom4xktynS7MoFY+NUWH5LC4CnH11YB8NPew+HLpmBLCybSAEyb2F+4pRXhuhWqFesoQd6DAyc2hw==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"ansi-align": "^3.0.1",
|
"ansi-align": "^3.0.1",
|
||||||
@@ -3850,7 +3843,6 @@
|
|||||||
"version": "8.0.0",
|
"version": "8.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-8.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-8.0.0.tgz",
|
||||||
"integrity": "sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA==",
|
"integrity": "sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=16"
|
"node": ">=16"
|
||||||
@@ -3935,7 +3927,6 @@
|
|||||||
"version": "3.0.0",
|
"version": "3.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz",
|
||||||
"integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==",
|
"integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=10"
|
"node": ">=10"
|
||||||
@@ -3975,7 +3966,6 @@
|
|||||||
"version": "0.6.5",
|
"version": "0.6.5",
|
||||||
"resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz",
|
"resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz",
|
||||||
"integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==",
|
"integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"string-width": "^4.2.0"
|
"string-width": "^4.2.0"
|
||||||
@@ -3991,7 +3981,6 @@
|
|||||||
"version": "5.0.1",
|
"version": "5.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
|
||||||
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
|
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8"
|
"node": ">=8"
|
||||||
@@ -4001,14 +3990,12 @@
|
|||||||
"version": "8.0.0",
|
"version": "8.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
||||||
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
|
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/cli-table3/node_modules/string-width": {
|
"node_modules/cli-table3/node_modules/string-width": {
|
||||||
"version": "4.2.3",
|
"version": "4.2.3",
|
||||||
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
|
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
|
||||||
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
|
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"emoji-regex": "^8.0.0",
|
"emoji-regex": "^8.0.0",
|
||||||
@@ -4023,7 +4010,6 @@
|
|||||||
"version": "6.0.1",
|
"version": "6.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
|
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
|
||||||
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
|
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"ansi-regex": "^5.0.1"
|
"ansi-regex": "^5.0.1"
|
||||||
@@ -9488,7 +9474,6 @@
|
|||||||
"version": "4.37.0",
|
"version": "4.37.0",
|
||||||
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.37.0.tgz",
|
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.37.0.tgz",
|
||||||
"integrity": "sha512-S/5/0kFftkq27FPNye0XM1e2NsnoD/3FS+pBmbjmmtLT6I+i344KoOf7pvXreaFsDamWeaJX55nczA1m5PsBDg==",
|
"integrity": "sha512-S/5/0kFftkq27FPNye0XM1e2NsnoD/3FS+pBmbjmmtLT6I+i344KoOf7pvXreaFsDamWeaJX55nczA1m5PsBDg==",
|
||||||
"dev": true,
|
|
||||||
"license": "(MIT OR CC0-1.0)",
|
"license": "(MIT OR CC0-1.0)",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=16"
|
"node": ">=16"
|
||||||
@@ -9698,7 +9683,6 @@
|
|||||||
"version": "5.0.0",
|
"version": "5.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/widest-line/-/widest-line-5.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/widest-line/-/widest-line-5.0.0.tgz",
|
||||||
"integrity": "sha512-c9bZp7b5YtRj2wOe6dlj32MK+Bx/M/d+9VB2SHM1OtsUHR0aV0tdP6DWh/iMt0kWi1t5g1Iudu6hQRNd1A4PVA==",
|
"integrity": "sha512-c9bZp7b5YtRj2wOe6dlj32MK+Bx/M/d+9VB2SHM1OtsUHR0aV0tdP6DWh/iMt0kWi1t5g1Iudu6hQRNd1A4PVA==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"string-width": "^7.0.0"
|
"string-width": "^7.0.0"
|
||||||
@@ -9714,7 +9698,6 @@
|
|||||||
"version": "9.0.0",
|
"version": "9.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.0.tgz",
|
||||||
"integrity": "sha512-G8ura3S+3Z2G+mkgNRq8dqaFZAuxfsxpBB8OCTGRTCtp+l/v9nbFNmCUP1BZMts3G1142MsZfn6eeUKrr4PD1Q==",
|
"integrity": "sha512-G8ura3S+3Z2G+mkgNRq8dqaFZAuxfsxpBB8OCTGRTCtp+l/v9nbFNmCUP1BZMts3G1142MsZfn6eeUKrr4PD1Q==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"ansi-styles": "^6.2.1",
|
"ansi-styles": "^6.2.1",
|
||||||
@@ -9732,7 +9715,6 @@
|
|||||||
"version": "6.2.1",
|
"version": "6.2.1",
|
||||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
|
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
|
||||||
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
|
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=12"
|
"node": ">=12"
|
||||||
|
|||||||
21
package.json
21
package.json
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.12.1",
|
"version": "0.13.2",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
@@ -64,7 +64,11 @@
|
|||||||
"ollama-ai-provider": "^1.2.0",
|
"ollama-ai-provider": "^1.2.0",
|
||||||
"openai": "^4.89.0",
|
"openai": "^4.89.0",
|
||||||
"ora": "^8.2.0",
|
"ora": "^8.2.0",
|
||||||
"uuid": "^11.1.0"
|
"uuid": "^11.1.0",
|
||||||
|
"boxen": "^8.0.1",
|
||||||
|
"chalk": "^5.4.1",
|
||||||
|
"cli-table3": "^0.6.5",
|
||||||
|
"zod": "^3.23.8"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=14.0.0"
|
"node": ">=14.0.0"
|
||||||
@@ -78,15 +82,14 @@
|
|||||||
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
|
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
|
||||||
},
|
},
|
||||||
"files": [
|
"files": [
|
||||||
"scripts/init.js",
|
"scripts/**",
|
||||||
"scripts/dev.js",
|
|
||||||
"scripts/modules/**",
|
|
||||||
"assets/**",
|
"assets/**",
|
||||||
".cursor/**",
|
".cursor/**",
|
||||||
"README-task-master.md",
|
"README-task-master.md",
|
||||||
"index.js",
|
"index.js",
|
||||||
"bin/**",
|
"bin/**",
|
||||||
"mcp-server/**"
|
"mcp-server/**",
|
||||||
|
"src/**"
|
||||||
],
|
],
|
||||||
"overrides": {
|
"overrides": {
|
||||||
"node-fetch": "^3.3.2",
|
"node-fetch": "^3.3.2",
|
||||||
@@ -96,9 +99,6 @@
|
|||||||
"@changesets/changelog-github": "^0.5.1",
|
"@changesets/changelog-github": "^0.5.1",
|
||||||
"@changesets/cli": "^2.28.1",
|
"@changesets/cli": "^2.28.1",
|
||||||
"@types/jest": "^29.5.14",
|
"@types/jest": "^29.5.14",
|
||||||
"boxen": "^8.0.1",
|
|
||||||
"chalk": "^5.4.1",
|
|
||||||
"cli-table3": "^0.6.5",
|
|
||||||
"execa": "^8.0.1",
|
"execa": "^8.0.1",
|
||||||
"ink": "^5.0.1",
|
"ink": "^5.0.1",
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
@@ -108,7 +108,6 @@
|
|||||||
"prettier": "^3.5.3",
|
"prettier": "^3.5.3",
|
||||||
"react": "^18.3.1",
|
"react": "^18.3.1",
|
||||||
"supertest": "^7.1.0",
|
"supertest": "^7.1.0",
|
||||||
"tsx": "^4.16.2",
|
"tsx": "^4.16.2"
|
||||||
"zod": "^3.23.8"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ The script can be configured through environment variables in a `.env` file at t
|
|||||||
- `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation
|
- `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation
|
||||||
- `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online")
|
- `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online")
|
||||||
- `DEBUG`: Enable debug logging (default: false)
|
- `DEBUG`: Enable debug logging (default: false)
|
||||||
- `LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
|
- `TASKMASTER_LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
|
||||||
- `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3)
|
- `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3)
|
||||||
- `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium)
|
- `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium)
|
||||||
- `PROJECT_NAME`: Override default project name in tasks.json
|
- `PROJECT_NAME`: Override default project name in tasks.json
|
||||||
@@ -47,7 +47,7 @@ The script can be configured through environment variables in a `.env` file at t
|
|||||||
- Tasks can have `subtasks` for more detailed implementation steps.
|
- Tasks can have `subtasks` for more detailed implementation steps.
|
||||||
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
- Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress.
|
||||||
|
|
||||||
2. **Script Commands**
|
2. **Script Commands**
|
||||||
You can run the script via:
|
You can run the script via:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -225,7 +225,7 @@ To use the Perplexity integration:
|
|||||||
|
|
||||||
## Logging
|
## Logging
|
||||||
|
|
||||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:
|
||||||
|
|
||||||
- `debug`: Detailed information, typically useful for troubleshooting
|
- `debug`: Detailed information, typically useful for troubleshooting
|
||||||
- `info`: Confirmation that things are working as expected (default)
|
- `info`: Confirmation that things are working as expected (default)
|
||||||
|
|||||||
@@ -38,10 +38,10 @@ const LOG_LEVELS = {
|
|||||||
success: 4
|
success: 4
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get log level from environment or default to info
|
// Determine log level from environment variable or default to 'info'
|
||||||
const LOG_LEVEL = process.env.LOG_LEVEL
|
const LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL
|
||||||
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
|
? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()]
|
||||||
: LOG_LEVELS.info;
|
: LOG_LEVELS.info; // Default to info
|
||||||
|
|
||||||
// Create a color gradient for the banner
|
// Create a color gradient for the banner
|
||||||
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
||||||
|
|||||||
@@ -14,7 +14,8 @@ import {
|
|||||||
getResearchModelId,
|
getResearchModelId,
|
||||||
getFallbackProvider,
|
getFallbackProvider,
|
||||||
getFallbackModelId,
|
getFallbackModelId,
|
||||||
getParametersForRole
|
getParametersForRole,
|
||||||
|
getBaseUrlForRole
|
||||||
} from './config-manager.js';
|
} from './config-manager.js';
|
||||||
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
||||||
|
|
||||||
@@ -284,7 +285,13 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
'AI service call failed for all configured roles.';
|
'AI service call failed for all configured roles.';
|
||||||
|
|
||||||
for (const currentRole of sequence) {
|
for (const currentRole of sequence) {
|
||||||
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
|
let providerName,
|
||||||
|
modelId,
|
||||||
|
apiKey,
|
||||||
|
roleParams,
|
||||||
|
providerFnSet,
|
||||||
|
providerApiFn,
|
||||||
|
baseUrl;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log('info', `New AI service call with role: ${currentRole}`);
|
log('info', `New AI service call with role: ${currentRole}`);
|
||||||
@@ -325,6 +332,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
|
|
||||||
// Pass effectiveProjectRoot to getParametersForRole
|
// Pass effectiveProjectRoot to getParametersForRole
|
||||||
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
||||||
|
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
|
||||||
|
|
||||||
// 2. Get Provider Function Set
|
// 2. Get Provider Function Set
|
||||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||||
@@ -401,6 +409,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
maxTokens: roleParams.maxTokens,
|
maxTokens: roleParams.maxTokens,
|
||||||
temperature: roleParams.temperature,
|
temperature: roleParams.temperature,
|
||||||
messages,
|
messages,
|
||||||
|
baseUrl,
|
||||||
...(serviceType === 'generateObject' && { schema, objectName }),
|
...(serviceType === 'generateObject' && { schema, objectName }),
|
||||||
...restApiParams
|
...restApiParams
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -73,7 +73,11 @@ import {
|
|||||||
getApiKeyStatusReport
|
getApiKeyStatusReport
|
||||||
} from './task-manager/models.js';
|
} from './task-manager/models.js';
|
||||||
import { findProjectRoot } from './utils.js';
|
import { findProjectRoot } from './utils.js';
|
||||||
|
import {
|
||||||
|
isValidTaskStatus,
|
||||||
|
TASK_STATUS_OPTIONS
|
||||||
|
} from '../../src/constants/task-status.js';
|
||||||
|
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||||
/**
|
/**
|
||||||
* Runs the interactive setup process for model configuration.
|
* Runs the interactive setup process for model configuration.
|
||||||
* @param {string|null} projectRoot - The resolved project root directory.
|
* @param {string|null} projectRoot - The resolved project root directory.
|
||||||
@@ -486,11 +490,6 @@ function registerCommands(programInstance) {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Default help
|
|
||||||
programInstance.on('--help', function () {
|
|
||||||
displayHelp();
|
|
||||||
});
|
|
||||||
|
|
||||||
// parse-prd command
|
// parse-prd command
|
||||||
programInstance
|
programInstance
|
||||||
.command('parse-prd')
|
.command('parse-prd')
|
||||||
@@ -515,7 +514,7 @@ function registerCommands(programInstance) {
|
|||||||
const outputPath = options.output;
|
const outputPath = options.output;
|
||||||
const force = options.force || false;
|
const force = options.force || false;
|
||||||
const append = options.append || false;
|
const append = options.append || false;
|
||||||
let useForce = false;
|
let useForce = force;
|
||||||
let useAppend = false;
|
let useAppend = false;
|
||||||
|
|
||||||
// Helper function to check if tasks.json exists and confirm overwrite
|
// Helper function to check if tasks.json exists and confirm overwrite
|
||||||
@@ -609,7 +608,7 @@ function registerCommands(programInstance) {
|
|||||||
spinner = ora('Parsing PRD and generating tasks...').start();
|
spinner = ora('Parsing PRD and generating tasks...').start();
|
||||||
await parsePRD(inputFile, outputPath, numTasks, {
|
await parsePRD(inputFile, outputPath, numTasks, {
|
||||||
append: useAppend,
|
append: useAppend,
|
||||||
force: useForce
|
useForce
|
||||||
});
|
});
|
||||||
spinner.succeed('Tasks generated successfully!');
|
spinner.succeed('Tasks generated successfully!');
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -1038,7 +1037,7 @@ function registerCommands(programInstance) {
|
|||||||
)
|
)
|
||||||
.option(
|
.option(
|
||||||
'-s, --status <status>',
|
'-s, --status <status>',
|
||||||
'New status (todo, in-progress, review, done)'
|
`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})`
|
||||||
)
|
)
|
||||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
@@ -1051,6 +1050,16 @@ function registerCommands(programInstance) {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!isValidTaskStatus(status)) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
|
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
|
||||||
);
|
);
|
||||||
@@ -1063,10 +1072,16 @@ function registerCommands(programInstance) {
|
|||||||
.command('list')
|
.command('list')
|
||||||
.description('List all tasks')
|
.description('List all tasks')
|
||||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||||
|
.option(
|
||||||
|
'-r, --report <report>',
|
||||||
|
'Path to the complexity report file',
|
||||||
|
'scripts/task-complexity-report.json'
|
||||||
|
)
|
||||||
.option('-s, --status <status>', 'Filter by status')
|
.option('-s, --status <status>', 'Filter by status')
|
||||||
.option('--with-subtasks', 'Show subtasks for each task')
|
.option('--with-subtasks', 'Show subtasks for each task')
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
const tasksPath = options.file;
|
const tasksPath = options.file;
|
||||||
|
const reportPath = options.report;
|
||||||
const statusFilter = options.status;
|
const statusFilter = options.status;
|
||||||
const withSubtasks = options.withSubtasks || false;
|
const withSubtasks = options.withSubtasks || false;
|
||||||
|
|
||||||
@@ -1078,7 +1093,7 @@ function registerCommands(programInstance) {
|
|||||||
console.log(chalk.blue('Including subtasks in listing'));
|
console.log(chalk.blue('Including subtasks in listing'));
|
||||||
}
|
}
|
||||||
|
|
||||||
await listTasks(tasksPath, statusFilter, withSubtasks);
|
await listTasks(tasksPath, statusFilter, reportPath, withSubtasks);
|
||||||
});
|
});
|
||||||
|
|
||||||
// expand command
|
// expand command
|
||||||
@@ -1278,10 +1293,6 @@ function registerCommands(programInstance) {
|
|||||||
'--details <details>',
|
'--details <details>',
|
||||||
'Implementation details (for manual task creation)'
|
'Implementation details (for manual task creation)'
|
||||||
)
|
)
|
||||||
.option(
|
|
||||||
'--test-strategy <testStrategy>',
|
|
||||||
'Test strategy (for manual task creation)'
|
|
||||||
)
|
|
||||||
.option(
|
.option(
|
||||||
'--dependencies <dependencies>',
|
'--dependencies <dependencies>',
|
||||||
'Comma-separated list of task IDs this task depends on'
|
'Comma-separated list of task IDs this task depends on'
|
||||||
@@ -1388,9 +1399,15 @@ function registerCommands(programInstance) {
|
|||||||
`Show the next task to work on based on dependencies and status${chalk.reset('')}`
|
`Show the next task to work on based on dependencies and status${chalk.reset('')}`
|
||||||
)
|
)
|
||||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||||
|
.option(
|
||||||
|
'-r, --report <report>',
|
||||||
|
'Path to the complexity report file',
|
||||||
|
'scripts/task-complexity-report.json'
|
||||||
|
)
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
const tasksPath = options.file;
|
const tasksPath = options.file;
|
||||||
await displayNextTask(tasksPath);
|
const reportPath = options.report;
|
||||||
|
await displayNextTask(tasksPath, reportPath);
|
||||||
});
|
});
|
||||||
|
|
||||||
// show command
|
// show command
|
||||||
@@ -1403,6 +1420,11 @@ function registerCommands(programInstance) {
|
|||||||
.option('-i, --id <id>', 'Task ID to show')
|
.option('-i, --id <id>', 'Task ID to show')
|
||||||
.option('-s, --status <status>', 'Filter subtasks by status') // ADDED status option
|
.option('-s, --status <status>', 'Filter subtasks by status') // ADDED status option
|
||||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||||
|
.option(
|
||||||
|
'-r, --report <report>',
|
||||||
|
'Path to the complexity report file',
|
||||||
|
'scripts/task-complexity-report.json'
|
||||||
|
)
|
||||||
.action(async (taskId, options) => {
|
.action(async (taskId, options) => {
|
||||||
const idArg = taskId || options.id;
|
const idArg = taskId || options.id;
|
||||||
const statusFilter = options.status; // ADDED: Capture status filter
|
const statusFilter = options.status; // ADDED: Capture status filter
|
||||||
@@ -1413,8 +1435,9 @@ function registerCommands(programInstance) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const tasksPath = options.file;
|
const tasksPath = options.file;
|
||||||
|
const reportPath = options.report;
|
||||||
// PASS statusFilter to the display function
|
// PASS statusFilter to the display function
|
||||||
await displayTaskById(tasksPath, idArg, statusFilter);
|
await displayTaskById(tasksPath, idArg, reportPath, statusFilter);
|
||||||
});
|
});
|
||||||
|
|
||||||
// add-dependency command
|
// add-dependency command
|
||||||
@@ -1663,6 +1686,7 @@ function registerCommands(programInstance) {
|
|||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
|
showAddSubtaskHelp();
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -2366,14 +2390,7 @@ function setupCLI() {
|
|||||||
return 'unknown'; // Default fallback if package.json fails
|
return 'unknown'; // Default fallback if package.json fails
|
||||||
})
|
})
|
||||||
.helpOption('-h, --help', 'Display help')
|
.helpOption('-h, --help', 'Display help')
|
||||||
.addHelpCommand(false) // Disable default help command
|
.addHelpCommand(false); // Disable default help command
|
||||||
.on('--help', () => {
|
|
||||||
displayHelp(); // Use your custom help display instead
|
|
||||||
})
|
|
||||||
.on('-h', () => {
|
|
||||||
displayHelp();
|
|
||||||
process.exit(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Modify the help option to use your custom display
|
// Modify the help option to use your custom display
|
||||||
programInstance.helpInformation = () => {
|
programInstance.helpInformation = () => {
|
||||||
@@ -2393,28 +2410,7 @@ function setupCLI() {
|
|||||||
*/
|
*/
|
||||||
async function checkForUpdate() {
|
async function checkForUpdate() {
|
||||||
// Get current version from package.json ONLY
|
// Get current version from package.json ONLY
|
||||||
let currentVersion = 'unknown'; // Initialize with a default
|
const currentVersion = getTaskMasterVersion();
|
||||||
try {
|
|
||||||
// Try to get the version from the installed package (if applicable) or current dir
|
|
||||||
let packageJsonPath = path.join(
|
|
||||||
process.cwd(),
|
|
||||||
'node_modules',
|
|
||||||
'task-master-ai',
|
|
||||||
'package.json'
|
|
||||||
);
|
|
||||||
// Fallback to current directory package.json if not found in node_modules
|
|
||||||
if (!fs.existsSync(packageJsonPath)) {
|
|
||||||
packageJsonPath = path.join(process.cwd(), 'package.json');
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fs.existsSync(packageJsonPath)) {
|
|
||||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
|
||||||
currentVersion = packageJson.version;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Silently fail and use default
|
|
||||||
log('debug', `Error reading current package version: ${error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
// Get the latest version from npm registry
|
// Get the latest version from npm registry
|
||||||
|
|||||||
@@ -677,6 +677,13 @@ function getAllProviders() {
|
|||||||
return Object.keys(MODEL_MAP || {});
|
return Object.keys(MODEL_MAP || {});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getBaseUrlForRole(role, explicitRoot = null) {
|
||||||
|
const roleConfig = getModelConfigForRole(role, explicitRoot);
|
||||||
|
return roleConfig && typeof roleConfig.baseUrl === 'string'
|
||||||
|
? roleConfig.baseUrl
|
||||||
|
: undefined;
|
||||||
|
}
|
||||||
|
|
||||||
export {
|
export {
|
||||||
// Core config access
|
// Core config access
|
||||||
getConfig,
|
getConfig,
|
||||||
@@ -704,6 +711,7 @@ export {
|
|||||||
getFallbackModelId,
|
getFallbackModelId,
|
||||||
getFallbackMaxTokens,
|
getFallbackMaxTokens,
|
||||||
getFallbackTemperature,
|
getFallbackTemperature,
|
||||||
|
getBaseUrlForRole,
|
||||||
|
|
||||||
// Global setting getters (No env var overrides)
|
// Global setting getters (No env var overrides)
|
||||||
getLogLevel,
|
getLogLevel,
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ import updateSubtaskById from './task-manager/update-subtask-by-id.js';
|
|||||||
import removeTask from './task-manager/remove-task.js';
|
import removeTask from './task-manager/remove-task.js';
|
||||||
import taskExists from './task-manager/task-exists.js';
|
import taskExists from './task-manager/task-exists.js';
|
||||||
import isTaskDependentOn from './task-manager/is-task-dependent.js';
|
import isTaskDependentOn from './task-manager/is-task-dependent.js';
|
||||||
|
import { readComplexityReport } from './utils.js';
|
||||||
// Export task manager functions
|
// Export task manager functions
|
||||||
export {
|
export {
|
||||||
parsePRD,
|
parsePRD,
|
||||||
@@ -45,5 +45,6 @@ export {
|
|||||||
removeTask,
|
removeTask,
|
||||||
findTaskById,
|
findTaskById,
|
||||||
taskExists,
|
taskExists,
|
||||||
isTaskDependentOn
|
isTaskDependentOn,
|
||||||
|
readComplexityReport
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
import { log } from '../utils.js';
|
||||||
|
import { addComplexityToTask } from '../utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the next work item:
|
* Return the next work item:
|
||||||
* • Prefer an eligible SUBTASK that belongs to any parent task
|
* • Prefer an eligible SUBTASK that belongs to any parent task
|
||||||
@@ -15,9 +18,10 @@
|
|||||||
* ─ parentId → number (present only when it's a subtask)
|
* ─ parentId → number (present only when it's a subtask)
|
||||||
*
|
*
|
||||||
* @param {Object[]} tasks – full array of top-level tasks, each may contain .subtasks[]
|
* @param {Object[]} tasks – full array of top-level tasks, each may contain .subtasks[]
|
||||||
|
* @param {Object} [complexityReport=null] - Optional complexity report object
|
||||||
* @returns {Object|null} – next work item or null if nothing is eligible
|
* @returns {Object|null} – next work item or null if nothing is eligible
|
||||||
*/
|
*/
|
||||||
function findNextTask(tasks) {
|
function findNextTask(tasks, complexityReport = null) {
|
||||||
// ---------- helpers ----------------------------------------------------
|
// ---------- helpers ----------------------------------------------------
|
||||||
const priorityValues = { high: 3, medium: 2, low: 1 };
|
const priorityValues = { high: 3, medium: 2, low: 1 };
|
||||||
|
|
||||||
@@ -91,7 +95,14 @@ function findNextTask(tasks) {
|
|||||||
if (aPar !== bPar) return aPar - bPar;
|
if (aPar !== bPar) return aPar - bPar;
|
||||||
return aSub - bSub;
|
return aSub - bSub;
|
||||||
});
|
});
|
||||||
return candidateSubtasks[0];
|
const nextTask = candidateSubtasks[0];
|
||||||
|
|
||||||
|
// Add complexity to the task before returning
|
||||||
|
if (nextTask && complexityReport) {
|
||||||
|
addComplexityToTask(nextTask, complexityReport);
|
||||||
|
}
|
||||||
|
|
||||||
|
return nextTask;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------- 2) fall back to top-level tasks (original logic) ------------
|
// ---------- 2) fall back to top-level tasks (original logic) ------------
|
||||||
@@ -116,6 +127,11 @@ function findNextTask(tasks) {
|
|||||||
return a.id - b.id;
|
return a.id - b.id;
|
||||||
})[0];
|
})[0];
|
||||||
|
|
||||||
|
// Add complexity to the task before returning
|
||||||
|
if (nextTask && complexityReport) {
|
||||||
|
addComplexityToTask(nextTask, complexityReport);
|
||||||
|
}
|
||||||
|
|
||||||
return nextTask;
|
return nextTask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,13 +2,20 @@ import chalk from 'chalk';
|
|||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import Table from 'cli-table3';
|
import Table from 'cli-table3';
|
||||||
|
|
||||||
import { log, readJSON, truncate } from '../utils.js';
|
import {
|
||||||
|
log,
|
||||||
|
readJSON,
|
||||||
|
truncate,
|
||||||
|
readComplexityReport,
|
||||||
|
addComplexityToTask
|
||||||
|
} from '../utils.js';
|
||||||
import findNextTask from './find-next-task.js';
|
import findNextTask from './find-next-task.js';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
displayBanner,
|
displayBanner,
|
||||||
getStatusWithColor,
|
getStatusWithColor,
|
||||||
formatDependenciesWithStatus,
|
formatDependenciesWithStatus,
|
||||||
|
getComplexityWithColor,
|
||||||
createProgressBar
|
createProgressBar
|
||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
|
|
||||||
@@ -16,6 +23,7 @@ import {
|
|||||||
* List all tasks
|
* List all tasks
|
||||||
* @param {string} tasksPath - Path to the tasks.json file
|
* @param {string} tasksPath - Path to the tasks.json file
|
||||||
* @param {string} statusFilter - Filter by status
|
* @param {string} statusFilter - Filter by status
|
||||||
|
* @param {string} reportPath - Path to the complexity report
|
||||||
* @param {boolean} withSubtasks - Whether to show subtasks
|
* @param {boolean} withSubtasks - Whether to show subtasks
|
||||||
* @param {string} outputFormat - Output format (text or json)
|
* @param {string} outputFormat - Output format (text or json)
|
||||||
* @returns {Object} - Task list result for json format
|
* @returns {Object} - Task list result for json format
|
||||||
@@ -23,6 +31,7 @@ import {
|
|||||||
function listTasks(
|
function listTasks(
|
||||||
tasksPath,
|
tasksPath,
|
||||||
statusFilter,
|
statusFilter,
|
||||||
|
reportPath = null,
|
||||||
withSubtasks = false,
|
withSubtasks = false,
|
||||||
outputFormat = 'text'
|
outputFormat = 'text'
|
||||||
) {
|
) {
|
||||||
@@ -37,6 +46,13 @@ function listTasks(
|
|||||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add complexity scores to tasks if report exists
|
||||||
|
const complexityReport = readComplexityReport(reportPath);
|
||||||
|
// Apply complexity scores to tasks
|
||||||
|
if (complexityReport && complexityReport.complexityAnalysis) {
|
||||||
|
data.tasks.forEach((task) => addComplexityToTask(task, complexityReport));
|
||||||
|
}
|
||||||
|
|
||||||
// Filter tasks by status if specified
|
// Filter tasks by status if specified
|
||||||
const filteredTasks =
|
const filteredTasks =
|
||||||
statusFilter && statusFilter.toLowerCase() !== 'all' // <-- Added check for 'all'
|
statusFilter && statusFilter.toLowerCase() !== 'all' // <-- Added check for 'all'
|
||||||
@@ -257,8 +273,8 @@ function listTasks(
|
|||||||
);
|
);
|
||||||
const avgDependenciesPerTask = totalDependencies / data.tasks.length;
|
const avgDependenciesPerTask = totalDependencies / data.tasks.length;
|
||||||
|
|
||||||
// Find next task to work on
|
// Find next task to work on, passing the complexity report
|
||||||
const nextItem = findNextTask(data.tasks);
|
const nextItem = findNextTask(data.tasks, complexityReport);
|
||||||
|
|
||||||
// Get terminal width - more reliable method
|
// Get terminal width - more reliable method
|
||||||
let terminalWidth;
|
let terminalWidth;
|
||||||
@@ -301,8 +317,11 @@ function listTasks(
|
|||||||
`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\n\n` +
|
`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\n\n` +
|
||||||
chalk.cyan.bold('Next Task to Work On:') +
|
chalk.cyan.bold('Next Task to Work On:') +
|
||||||
'\n' +
|
'\n' +
|
||||||
`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}\n` +
|
`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}
|
||||||
`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : ''}`;
|
` +
|
||||||
|
`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : ''}
|
||||||
|
` +
|
||||||
|
`Complexity: ${nextItem && nextItem.complexityScore ? getComplexityWithColor(nextItem.complexityScore) : chalk.gray('N/A')}`;
|
||||||
|
|
||||||
// Calculate width for side-by-side display
|
// Calculate width for side-by-side display
|
||||||
// Box borders, padding take approximately 4 chars on each side
|
// Box borders, padding take approximately 4 chars on each side
|
||||||
@@ -412,9 +431,16 @@ function listTasks(
|
|||||||
// Make dependencies column smaller as requested (-20%)
|
// Make dependencies column smaller as requested (-20%)
|
||||||
const depsWidthPct = 20;
|
const depsWidthPct = 20;
|
||||||
|
|
||||||
|
const complexityWidthPct = 10;
|
||||||
|
|
||||||
// Calculate title/description width as remaining space (+20% from dependencies reduction)
|
// Calculate title/description width as remaining space (+20% from dependencies reduction)
|
||||||
const titleWidthPct =
|
const titleWidthPct =
|
||||||
100 - idWidthPct - statusWidthPct - priorityWidthPct - depsWidthPct;
|
100 -
|
||||||
|
idWidthPct -
|
||||||
|
statusWidthPct -
|
||||||
|
priorityWidthPct -
|
||||||
|
depsWidthPct -
|
||||||
|
complexityWidthPct;
|
||||||
|
|
||||||
// Allow 10 characters for borders and padding
|
// Allow 10 characters for borders and padding
|
||||||
const availableWidth = terminalWidth - 10;
|
const availableWidth = terminalWidth - 10;
|
||||||
@@ -424,6 +450,9 @@ function listTasks(
|
|||||||
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
|
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
|
||||||
const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));
|
const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));
|
||||||
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
|
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
|
||||||
|
const complexityWidth = Math.floor(
|
||||||
|
availableWidth * (complexityWidthPct / 100)
|
||||||
|
);
|
||||||
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
|
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
|
||||||
|
|
||||||
// Create a table with correct borders and spacing
|
// Create a table with correct borders and spacing
|
||||||
@@ -433,9 +462,17 @@ function listTasks(
|
|||||||
chalk.cyan.bold('Title'),
|
chalk.cyan.bold('Title'),
|
||||||
chalk.cyan.bold('Status'),
|
chalk.cyan.bold('Status'),
|
||||||
chalk.cyan.bold('Priority'),
|
chalk.cyan.bold('Priority'),
|
||||||
chalk.cyan.bold('Dependencies')
|
chalk.cyan.bold('Dependencies'),
|
||||||
|
chalk.cyan.bold('Complexity')
|
||||||
|
],
|
||||||
|
colWidths: [
|
||||||
|
idWidth,
|
||||||
|
titleWidth,
|
||||||
|
statusWidth,
|
||||||
|
priorityWidth,
|
||||||
|
depsWidth,
|
||||||
|
complexityWidth // Added complexity column width
|
||||||
],
|
],
|
||||||
colWidths: [idWidth, titleWidth, statusWidth, priorityWidth, depsWidth],
|
|
||||||
style: {
|
style: {
|
||||||
head: [], // No special styling for header
|
head: [], // No special styling for header
|
||||||
border: [], // No special styling for border
|
border: [], // No special styling for border
|
||||||
@@ -454,7 +491,8 @@ function listTasks(
|
|||||||
depText = formatDependenciesWithStatus(
|
depText = formatDependenciesWithStatus(
|
||||||
task.dependencies,
|
task.dependencies,
|
||||||
data.tasks,
|
data.tasks,
|
||||||
true
|
true,
|
||||||
|
complexityReport
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
depText = chalk.gray('None');
|
depText = chalk.gray('None');
|
||||||
@@ -480,7 +518,10 @@ function listTasks(
|
|||||||
truncate(cleanTitle, titleWidth - 3),
|
truncate(cleanTitle, titleWidth - 3),
|
||||||
status,
|
status,
|
||||||
priorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),
|
priorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),
|
||||||
depText // No truncation for dependencies
|
depText,
|
||||||
|
task.complexityScore
|
||||||
|
? getComplexityWithColor(task.complexityScore)
|
||||||
|
: chalk.gray('N/A')
|
||||||
]);
|
]);
|
||||||
|
|
||||||
// Add subtasks if requested
|
// Add subtasks if requested
|
||||||
@@ -516,6 +557,8 @@ function listTasks(
|
|||||||
// Default to regular task dependency
|
// Default to regular task dependency
|
||||||
const depTask = data.tasks.find((t) => t.id === depId);
|
const depTask = data.tasks.find((t) => t.id === depId);
|
||||||
if (depTask) {
|
if (depTask) {
|
||||||
|
// Add complexity to depTask before checking status
|
||||||
|
addComplexityToTask(depTask, complexityReport);
|
||||||
const isDone =
|
const isDone =
|
||||||
depTask.status === 'done' || depTask.status === 'completed';
|
depTask.status === 'done' || depTask.status === 'completed';
|
||||||
const isInProgress = depTask.status === 'in-progress';
|
const isInProgress = depTask.status === 'in-progress';
|
||||||
@@ -541,7 +584,10 @@ function listTasks(
|
|||||||
chalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),
|
chalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),
|
||||||
getStatusWithColor(subtask.status, true),
|
getStatusWithColor(subtask.status, true),
|
||||||
chalk.dim('-'),
|
chalk.dim('-'),
|
||||||
subtaskDepText // No truncation for dependencies
|
subtaskDepText,
|
||||||
|
subtask.complexityScore
|
||||||
|
? chalk.gray(`${subtask.complexityScore}`)
|
||||||
|
: chalk.gray('N/A')
|
||||||
]);
|
]);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -597,6 +643,8 @@ function listTasks(
|
|||||||
subtasksSection = `\n\n${chalk.white.bold('Subtasks:')}\n`;
|
subtasksSection = `\n\n${chalk.white.bold('Subtasks:')}\n`;
|
||||||
subtasksSection += parentTaskForSubtasks.subtasks
|
subtasksSection += parentTaskForSubtasks.subtasks
|
||||||
.map((subtask) => {
|
.map((subtask) => {
|
||||||
|
// Add complexity to subtask before display
|
||||||
|
addComplexityToTask(subtask, complexityReport);
|
||||||
// Using a more simplified format for subtask status display
|
// Using a more simplified format for subtask status display
|
||||||
const status = subtask.status || 'pending';
|
const status = subtask.status || 'pending';
|
||||||
const statusColors = {
|
const statusColors = {
|
||||||
@@ -625,8 +673,8 @@ function listTasks(
|
|||||||
'\n\n' +
|
'\n\n' +
|
||||||
// Use nextItem.priority, nextItem.status, nextItem.dependencies
|
// Use nextItem.priority, nextItem.status, nextItem.dependencies
|
||||||
`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\n` +
|
`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\n` +
|
||||||
`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : chalk.gray('None')}\n\n` +
|
`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : chalk.gray('None')}\n\n` +
|
||||||
// Use nextItem.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
|
// Use nextTask.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
|
||||||
// *** Fetching original item for description and details ***
|
// *** Fetching original item for description and details ***
|
||||||
`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +
|
`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +
|
||||||
subtasksSection + // <-- Subtasks are handled above now
|
subtasksSection + // <-- Subtasks are handled above now
|
||||||
|
|||||||
@@ -8,6 +8,10 @@ import { validateTaskDependencies } from '../dependency-manager.js';
|
|||||||
import { getDebugFlag } from '../config-manager.js';
|
import { getDebugFlag } from '../config-manager.js';
|
||||||
import updateSingleTaskStatus from './update-single-task-status.js';
|
import updateSingleTaskStatus from './update-single-task-status.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
import {
|
||||||
|
isValidTaskStatus,
|
||||||
|
TASK_STATUS_OPTIONS
|
||||||
|
} from '../../../src/constants/task-status.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the status of a task
|
* Set the status of a task
|
||||||
@@ -19,6 +23,11 @@ import generateTaskFiles from './generate-task-files.js';
|
|||||||
*/
|
*/
|
||||||
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
||||||
try {
|
try {
|
||||||
|
if (!isValidTaskStatus(newStatus)) {
|
||||||
|
throw new Error(
|
||||||
|
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
// Determine if we're in MCP mode by checking for mcpLog
|
// Determine if we're in MCP mode by checking for mcpLog
|
||||||
const isMcpMode = !!options?.mcpLog;
|
const isMcpMode = !!options?.mcpLog;
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
|
|
||||||
import { log } from '../utils.js';
|
import { log } from '../utils.js';
|
||||||
|
import { isValidTaskStatus } from '../../../src/constants/task-status.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update the status of a single task
|
* Update the status of a single task
|
||||||
@@ -17,6 +18,12 @@ async function updateSingleTaskStatus(
|
|||||||
data,
|
data,
|
||||||
showUi = true
|
showUi = true
|
||||||
) {
|
) {
|
||||||
|
if (!isValidTaskStatus(newStatus)) {
|
||||||
|
throw new Error(
|
||||||
|
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// Check if it's a subtask (e.g., "1.2")
|
// Check if it's a subtask (e.g., "1.2")
|
||||||
if (taskIdInput.includes('.')) {
|
if (taskIdInput.includes('.')) {
|
||||||
const [parentId, subtaskId] = taskIdInput
|
const [parentId, subtaskId] = taskIdInput
|
||||||
|
|||||||
@@ -16,10 +16,15 @@ import {
|
|||||||
truncate,
|
truncate,
|
||||||
isSilentMode
|
isSilentMode
|
||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import path from 'path';
|
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
import {
|
||||||
|
findNextTask,
|
||||||
|
analyzeTaskComplexity,
|
||||||
|
readComplexityReport
|
||||||
|
} from './task-manager.js';
|
||||||
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
||||||
|
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
|
||||||
|
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||||
|
|
||||||
// Create a color gradient for the banner
|
// Create a color gradient for the banner
|
||||||
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
||||||
@@ -46,17 +51,7 @@ function displayBanner() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Read version directly from package.json
|
// Read version directly from package.json
|
||||||
let version = 'unknown'; // Initialize with a default
|
const version = getTaskMasterVersion();
|
||||||
try {
|
|
||||||
const packageJsonPath = path.join(process.cwd(), 'package.json');
|
|
||||||
if (fs.existsSync(packageJsonPath)) {
|
|
||||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
|
||||||
version = packageJson.version;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Silently fall back to default version
|
|
||||||
log('warn', 'Could not read package.json for version info.');
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
boxen(
|
boxen(
|
||||||
@@ -273,12 +268,14 @@ function getStatusWithColor(status, forTable = false) {
|
|||||||
* @param {Array} dependencies - Array of dependency IDs
|
* @param {Array} dependencies - Array of dependency IDs
|
||||||
* @param {Array} allTasks - Array of all tasks
|
* @param {Array} allTasks - Array of all tasks
|
||||||
* @param {boolean} forConsole - Whether the output is for console display
|
* @param {boolean} forConsole - Whether the output is for console display
|
||||||
|
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
|
||||||
* @returns {string} Formatted dependencies string
|
* @returns {string} Formatted dependencies string
|
||||||
*/
|
*/
|
||||||
function formatDependenciesWithStatus(
|
function formatDependenciesWithStatus(
|
||||||
dependencies,
|
dependencies,
|
||||||
allTasks,
|
allTasks,
|
||||||
forConsole = false
|
forConsole = false,
|
||||||
|
complexityReport = null // Add complexityReport parameter
|
||||||
) {
|
) {
|
||||||
if (
|
if (
|
||||||
!dependencies ||
|
!dependencies ||
|
||||||
@@ -342,7 +339,11 @@ function formatDependenciesWithStatus(
|
|||||||
typeof depId === 'string' ? parseInt(depId, 10) : depId;
|
typeof depId === 'string' ? parseInt(depId, 10) : depId;
|
||||||
|
|
||||||
// Look up the task using the numeric ID
|
// Look up the task using the numeric ID
|
||||||
const depTaskResult = findTaskById(allTasks, numericDepId);
|
const depTaskResult = findTaskById(
|
||||||
|
allTasks,
|
||||||
|
numericDepId,
|
||||||
|
complexityReport
|
||||||
|
);
|
||||||
const depTask = depTaskResult.task; // Access the task object from the result
|
const depTask = depTaskResult.task; // Access the task object from the result
|
||||||
|
|
||||||
if (!depTask) {
|
if (!depTask) {
|
||||||
@@ -458,7 +459,7 @@ function displayHelp() {
|
|||||||
{
|
{
|
||||||
name: 'set-status',
|
name: 'set-status',
|
||||||
args: '--id=<id> --status=<status>',
|
args: '--id=<id> --status=<status>',
|
||||||
desc: 'Update task status (done, pending, etc.)'
|
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: 'update',
|
name: 'update',
|
||||||
@@ -761,7 +762,7 @@ function truncateString(str, maxLength) {
|
|||||||
* Display the next task to work on
|
* Display the next task to work on
|
||||||
* @param {string} tasksPath - Path to the tasks.json file
|
* @param {string} tasksPath - Path to the tasks.json file
|
||||||
*/
|
*/
|
||||||
async function displayNextTask(tasksPath) {
|
async function displayNextTask(tasksPath, complexityReportPath = null) {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
|
|
||||||
// Read the tasks file
|
// Read the tasks file
|
||||||
@@ -771,8 +772,11 @@ async function displayNextTask(tasksPath) {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read complexity report once
|
||||||
|
const complexityReport = readComplexityReport(complexityReportPath);
|
||||||
|
|
||||||
// Find the next task
|
// Find the next task
|
||||||
const nextTask = findNextTask(data.tasks);
|
const nextTask = findNextTask(data.tasks, complexityReport);
|
||||||
|
|
||||||
if (!nextTask) {
|
if (!nextTask) {
|
||||||
console.log(
|
console.log(
|
||||||
@@ -809,12 +813,7 @@ async function displayNextTask(tasksPath) {
|
|||||||
'padding-bottom': 0,
|
'padding-bottom': 0,
|
||||||
compact: true
|
compact: true
|
||||||
},
|
},
|
||||||
chars: {
|
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },
|
||||||
mid: '',
|
|
||||||
'left-mid': '',
|
|
||||||
'mid-mid': '',
|
|
||||||
'right-mid': ''
|
|
||||||
},
|
|
||||||
colWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],
|
colWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],
|
||||||
wordWrap: true
|
wordWrap: true
|
||||||
});
|
});
|
||||||
@@ -838,7 +837,18 @@ async function displayNextTask(tasksPath) {
|
|||||||
],
|
],
|
||||||
[
|
[
|
||||||
chalk.cyan.bold('Dependencies:'),
|
chalk.cyan.bold('Dependencies:'),
|
||||||
formatDependenciesWithStatus(nextTask.dependencies, data.tasks, true)
|
formatDependenciesWithStatus(
|
||||||
|
nextTask.dependencies,
|
||||||
|
data.tasks,
|
||||||
|
true,
|
||||||
|
complexityReport
|
||||||
|
)
|
||||||
|
],
|
||||||
|
[
|
||||||
|
chalk.cyan.bold('Complexity:'),
|
||||||
|
nextTask.complexityScore
|
||||||
|
? getComplexityWithColor(nextTask.complexityScore)
|
||||||
|
: chalk.gray('N/A')
|
||||||
],
|
],
|
||||||
[chalk.cyan.bold('Description:'), nextTask.description]
|
[chalk.cyan.bold('Description:'), nextTask.description]
|
||||||
);
|
);
|
||||||
@@ -902,12 +912,7 @@ async function displayNextTask(tasksPath) {
|
|||||||
'padding-bottom': 0,
|
'padding-bottom': 0,
|
||||||
compact: true
|
compact: true
|
||||||
},
|
},
|
||||||
chars: {
|
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },
|
||||||
mid: '',
|
|
||||||
'left-mid': '',
|
|
||||||
'mid-mid': '',
|
|
||||||
'right-mid': ''
|
|
||||||
},
|
|
||||||
wordWrap: true
|
wordWrap: true
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -1011,7 +1016,12 @@ async function displayNextTask(tasksPath) {
|
|||||||
* @param {string|number} taskId - The ID of the task to display
|
* @param {string|number} taskId - The ID of the task to display
|
||||||
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
||||||
*/
|
*/
|
||||||
async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
async function displayTaskById(
|
||||||
|
tasksPath,
|
||||||
|
taskId,
|
||||||
|
complexityReportPath = null,
|
||||||
|
statusFilter = null
|
||||||
|
) {
|
||||||
displayBanner();
|
displayBanner();
|
||||||
|
|
||||||
// Read the tasks file
|
// Read the tasks file
|
||||||
@@ -1021,11 +1031,15 @@ async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read complexity report once
|
||||||
|
const complexityReport = readComplexityReport(complexityReportPath);
|
||||||
|
|
||||||
// Find the task by ID, applying the status filter if provided
|
// Find the task by ID, applying the status filter if provided
|
||||||
// Returns { task, originalSubtaskCount, originalSubtasks }
|
// Returns { task, originalSubtaskCount, originalSubtasks }
|
||||||
const { task, originalSubtaskCount, originalSubtasks } = findTaskById(
|
const { task, originalSubtaskCount, originalSubtasks } = findTaskById(
|
||||||
data.tasks,
|
data.tasks,
|
||||||
taskId,
|
taskId,
|
||||||
|
complexityReport,
|
||||||
statusFilter
|
statusFilter
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -1080,6 +1094,12 @@ async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
|||||||
chalk.cyan.bold('Status:'),
|
chalk.cyan.bold('Status:'),
|
||||||
getStatusWithColor(task.status || 'pending', true)
|
getStatusWithColor(task.status || 'pending', true)
|
||||||
],
|
],
|
||||||
|
[
|
||||||
|
chalk.cyan.bold('Complexity:'),
|
||||||
|
task.complexityScore
|
||||||
|
? getComplexityWithColor(task.complexityScore)
|
||||||
|
: chalk.gray('N/A')
|
||||||
|
],
|
||||||
[
|
[
|
||||||
chalk.cyan.bold('Description:'),
|
chalk.cyan.bold('Description:'),
|
||||||
task.description || 'No description provided.'
|
task.description || 'No description provided.'
|
||||||
@@ -1158,7 +1178,18 @@ async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
|||||||
[chalk.cyan.bold('Priority:'), priorityColor(task.priority || 'medium')],
|
[chalk.cyan.bold('Priority:'), priorityColor(task.priority || 'medium')],
|
||||||
[
|
[
|
||||||
chalk.cyan.bold('Dependencies:'),
|
chalk.cyan.bold('Dependencies:'),
|
||||||
formatDependenciesWithStatus(task.dependencies, data.tasks, true)
|
formatDependenciesWithStatus(
|
||||||
|
task.dependencies,
|
||||||
|
data.tasks,
|
||||||
|
true,
|
||||||
|
complexityReport
|
||||||
|
)
|
||||||
|
],
|
||||||
|
[
|
||||||
|
chalk.cyan.bold('Complexity:'),
|
||||||
|
task.complexityScore
|
||||||
|
? getComplexityWithColor(task.complexityScore)
|
||||||
|
: chalk.gray('N/A')
|
||||||
],
|
],
|
||||||
[chalk.cyan.bold('Description:'), task.description]
|
[chalk.cyan.bold('Description:'), task.description]
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -275,6 +275,22 @@ function findTaskInComplexityReport(report, taskId) {
|
|||||||
return report.complexityAnalysis.find((task) => task.taskId === taskId);
|
return report.complexityAnalysis.find((task) => task.taskId === taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function addComplexityToTask(task, complexityReport) {
|
||||||
|
let taskId;
|
||||||
|
if (task.isSubtask) {
|
||||||
|
taskId = task.parentTask.id;
|
||||||
|
} else if (task.parentId) {
|
||||||
|
taskId = task.parentId;
|
||||||
|
} else {
|
||||||
|
taskId = task.id;
|
||||||
|
}
|
||||||
|
|
||||||
|
const taskAnalysis = findTaskInComplexityReport(complexityReport, taskId);
|
||||||
|
if (taskAnalysis) {
|
||||||
|
task.complexityScore = taskAnalysis.complexityScore;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks if a task exists in the tasks array
|
* Checks if a task exists in the tasks array
|
||||||
* @param {Array} tasks - The tasks array
|
* @param {Array} tasks - The tasks array
|
||||||
@@ -325,10 +341,17 @@ function formatTaskId(id) {
|
|||||||
* Finds a task by ID in the tasks array. Optionally filters subtasks by status.
|
* Finds a task by ID in the tasks array. Optionally filters subtasks by status.
|
||||||
* @param {Array} tasks - The tasks array
|
* @param {Array} tasks - The tasks array
|
||||||
* @param {string|number} taskId - The task ID to find
|
* @param {string|number} taskId - The task ID to find
|
||||||
|
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
|
||||||
|
* @returns {Object|null} The task object or null if not found
|
||||||
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
||||||
* @returns {{task: Object|null, originalSubtaskCount: number|null}} The task object (potentially with filtered subtasks) and the original subtask count if filtered, or nulls if not found.
|
* @returns {{task: Object|null, originalSubtaskCount: number|null}} The task object (potentially with filtered subtasks) and the original subtask count if filtered, or nulls if not found.
|
||||||
*/
|
*/
|
||||||
function findTaskById(tasks, taskId, statusFilter = null) {
|
function findTaskById(
|
||||||
|
tasks,
|
||||||
|
taskId,
|
||||||
|
complexityReport = null,
|
||||||
|
statusFilter = null
|
||||||
|
) {
|
||||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||||
return { task: null, originalSubtaskCount: null };
|
return { task: null, originalSubtaskCount: null };
|
||||||
}
|
}
|
||||||
@@ -356,10 +379,17 @@ function findTaskById(tasks, taskId, statusFilter = null) {
|
|||||||
subtask.isSubtask = true;
|
subtask.isSubtask = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the found subtask (or null) and null for originalSubtaskCount
|
// If we found a task, check for complexity data
|
||||||
|
if (subtask && complexityReport) {
|
||||||
|
addComplexityToTask(subtask, complexityReport);
|
||||||
|
}
|
||||||
|
|
||||||
return { task: subtask || null, originalSubtaskCount: null };
|
return { task: subtask || null, originalSubtaskCount: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let taskResult = null;
|
||||||
|
let originalSubtaskCount = null;
|
||||||
|
|
||||||
// Find the main task
|
// Find the main task
|
||||||
const id = parseInt(taskId, 10);
|
const id = parseInt(taskId, 10);
|
||||||
const task = tasks.find((t) => t.id === id) || null;
|
const task = tasks.find((t) => t.id === id) || null;
|
||||||
@@ -369,6 +399,8 @@ function findTaskById(tasks, taskId, statusFilter = null) {
|
|||||||
return { task: null, originalSubtaskCount: null };
|
return { task: null, originalSubtaskCount: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taskResult = task;
|
||||||
|
|
||||||
// If task found and statusFilter provided, filter its subtasks
|
// If task found and statusFilter provided, filter its subtasks
|
||||||
if (statusFilter && task.subtasks && Array.isArray(task.subtasks)) {
|
if (statusFilter && task.subtasks && Array.isArray(task.subtasks)) {
|
||||||
const originalSubtaskCount = task.subtasks.length;
|
const originalSubtaskCount = task.subtasks.length;
|
||||||
@@ -379,12 +411,18 @@ function findTaskById(tasks, taskId, statusFilter = null) {
|
|||||||
subtask.status &&
|
subtask.status &&
|
||||||
subtask.status.toLowerCase() === statusFilter.toLowerCase()
|
subtask.status.toLowerCase() === statusFilter.toLowerCase()
|
||||||
);
|
);
|
||||||
// Return the filtered task and the original count
|
|
||||||
return { task: filteredTask, originalSubtaskCount: originalSubtaskCount };
|
taskResult = filteredTask;
|
||||||
|
originalSubtaskCount = originalSubtaskCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return original task and null count if no filter or no subtasks
|
// If task found and complexityReport provided, add complexity data
|
||||||
return { task: task, originalSubtaskCount: null };
|
if (taskResult && complexityReport) {
|
||||||
|
addComplexityToTask(taskResult, complexityReport);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the found task and original subtask count
|
||||||
|
return { task: taskResult, originalSubtaskCount };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -524,10 +562,11 @@ export {
|
|||||||
findCycles,
|
findCycles,
|
||||||
toKebabCase,
|
toKebabCase,
|
||||||
detectCamelCaseFlags,
|
detectCamelCaseFlags,
|
||||||
enableSilentMode,
|
|
||||||
disableSilentMode,
|
disableSilentMode,
|
||||||
isSilentMode,
|
enableSilentMode,
|
||||||
resolveEnvVariable,
|
|
||||||
getTaskManager,
|
getTaskManager,
|
||||||
|
isSilentMode,
|
||||||
|
addComplexityToTask,
|
||||||
|
resolveEnvVariable,
|
||||||
findProjectRoot
|
findProjectRoot
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
* using the Vercel AI SDK.
|
* using the Vercel AI SDK.
|
||||||
*/
|
*/
|
||||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||||
import { generateText, streamText, generateObject, streamObject } from 'ai';
|
import { generateText, streamText, generateObject } from 'ai';
|
||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||||
|
|
||||||
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
||||||
@@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
|
|||||||
// Remove the global variable and caching logic
|
// Remove the global variable and caching logic
|
||||||
// let anthropicClient;
|
// let anthropicClient;
|
||||||
|
|
||||||
function getClient(apiKey) {
|
function getClient(apiKey, baseUrl) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
// In a real scenario, this would use the config resolver.
|
// In a real scenario, this would use the config resolver.
|
||||||
// Throwing error here if key isn't passed for simplicity.
|
// Throwing error here if key isn't passed for simplicity.
|
||||||
@@ -30,14 +30,12 @@ function getClient(apiKey) {
|
|||||||
// Create and return a new instance directly with standard version header
|
// Create and return a new instance directly with standard version header
|
||||||
return createAnthropic({
|
return createAnthropic({
|
||||||
apiKey: apiKey,
|
apiKey: apiKey,
|
||||||
baseURL: 'https://api.anthropic.com/v1',
|
...(baseUrl && { baseURL: baseUrl }),
|
||||||
// Use standard version header instead of beta
|
// Use standard version header instead of beta
|
||||||
headers: {
|
headers: {
|
||||||
'anthropic-beta': 'output-128k-2025-02-19'
|
'anthropic-beta': 'output-128k-2025-02-19'
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// }
|
|
||||||
// return anthropicClient;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Standardized Service Function Implementations ---
|
// --- Standardized Service Function Implementations ---
|
||||||
@@ -51,6 +49,7 @@ function getClient(apiKey) {
|
|||||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -59,11 +58,12 @@ export async function generateAnthropicText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -93,6 +93,7 @@ export async function generateAnthropicText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -101,20 +102,20 @@ export async function streamAnthropicText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
// --- DEBUG LOGGING --- >>
|
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
'[streamAnthropicText] Parameters received by streamText:',
|
'[streamAnthropicText] Parameters received by streamText:',
|
||||||
JSON.stringify(
|
JSON.stringify(
|
||||||
{
|
{
|
||||||
modelId: modelId, // Log modelId being used
|
modelId: modelId,
|
||||||
messages: messages, // Log the messages array
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
},
|
},
|
||||||
@@ -122,25 +123,19 @@ export async function streamAnthropicText({
|
|||||||
2
|
2
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
// --- << DEBUG LOGGING ---
|
|
||||||
|
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
// Beta header moved to client initialization
|
|
||||||
// TODO: Add other relevant parameters
|
// TODO: Add other relevant parameters
|
||||||
});
|
});
|
||||||
|
|
||||||
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
||||||
return stream;
|
return stream;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log('error', `Anthropic streamText failed: ${error.message}`, error.stack);
|
||||||
'error',
|
|
||||||
`Anthropic streamText failed: ${error.message}`,
|
|
||||||
error.stack // Log stack trace for more details
|
|
||||||
);
|
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -160,6 +155,7 @@ export async function streamAnthropicText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If generation or validation fails.
|
* @throws {Error} If generation or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -171,24 +167,22 @@ export async function generateAnthropicObject({
|
|||||||
objectName = 'generated_object',
|
objectName = 'generated_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 3
|
maxRetries = 3,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
// Log basic debug info
|
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
||||||
);
|
);
|
||||||
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
|
mode: 'tool',
|
||||||
schema: schema,
|
schema: schema,
|
||||||
messages: messages,
|
messages: messages,
|
||||||
tool: {
|
tool: {
|
||||||
@@ -199,14 +193,12 @@ export async function generateAnthropicObject({
|
|||||||
temperature: temperature,
|
temperature: temperature,
|
||||||
maxRetries: maxRetries
|
maxRetries: maxRetries
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||||
);
|
);
|
||||||
return result.object;
|
return result.object;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Simple error logging
|
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
||||||
|
|||||||
@@ -12,6 +12,16 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
|||||||
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
|
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
|
||||||
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
||||||
|
|
||||||
|
function getClient(apiKey, baseUrl) {
|
||||||
|
if (!apiKey) {
|
||||||
|
throw new Error('Google API key is required.');
|
||||||
|
}
|
||||||
|
return createGoogleGenerativeAI({
|
||||||
|
apiKey: apiKey,
|
||||||
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using a Google AI model.
|
* Generates text using a Google AI model.
|
||||||
*
|
*
|
||||||
@@ -29,7 +39,8 @@ async function generateGoogleText({
|
|||||||
modelId = DEFAULT_MODEL,
|
modelId = DEFAULT_MODEL,
|
||||||
temperature = DEFAULT_TEMPERATURE,
|
temperature = DEFAULT_TEMPERATURE,
|
||||||
messages,
|
messages,
|
||||||
maxTokens // Note: Vercel SDK might handle this differently, needs verification
|
maxTokens,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -37,28 +48,21 @@ async function generateGoogleText({
|
|||||||
log('info', `Generating text with Google model: ${modelId}`);
|
log('info', `Generating text with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
const googleProvider = getClient(apiKey, baseUrl);
|
||||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
const model = googleProvider(modelId);
|
||||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
|
||||||
const model = googleProvider(modelId); // Correct model retrieval
|
|
||||||
|
|
||||||
// Construct payload suitable for Vercel SDK's generateText
|
|
||||||
// Note: The exact structure might depend on how messages are passed
|
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model, // Pass the model instance
|
model,
|
||||||
messages, // Pass the messages array directly
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available
|
maxOutputTokens: maxTokens
|
||||||
});
|
});
|
||||||
|
return result.text;
|
||||||
// Assuming result structure provides text directly or within a property
|
|
||||||
return result.text; // Adjust based on actual SDK response
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
`Error generating text with Google (${modelId}): ${error.message}`
|
`Error generating text with Google (${modelId}): ${error.message}`
|
||||||
);
|
);
|
||||||
throw error; // Re-throw for unified service handler
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,7 +83,8 @@ async function streamGoogleText({
|
|||||||
modelId = DEFAULT_MODEL,
|
modelId = DEFAULT_MODEL,
|
||||||
temperature = DEFAULT_TEMPERATURE,
|
temperature = DEFAULT_TEMPERATURE,
|
||||||
messages,
|
messages,
|
||||||
maxTokens
|
maxTokens,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -87,19 +92,15 @@ async function streamGoogleText({
|
|||||||
log('info', `Streaming text with Google model: ${modelId}`);
|
log('info', `Streaming text with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
const googleProvider = getClient(apiKey, baseUrl);
|
||||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
const model = googleProvider(modelId);
|
||||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
|
||||||
const model = googleProvider(modelId); // Correct model retrieval
|
|
||||||
|
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model, // Pass the model instance
|
model,
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens
|
maxOutputTokens: maxTokens
|
||||||
});
|
});
|
||||||
|
return stream;
|
||||||
return stream; // Return the stream directly
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
@@ -130,7 +131,8 @@ async function generateGoogleObject({
|
|||||||
messages,
|
messages,
|
||||||
schema,
|
schema,
|
||||||
objectName, // Note: Vercel SDK might use this differently or not at all
|
objectName, // Note: Vercel SDK might use this differently or not at all
|
||||||
maxTokens
|
maxTokens,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -138,23 +140,16 @@ async function generateGoogleObject({
|
|||||||
log('info', `Generating object with Google model: ${modelId}`);
|
log('info', `Generating object with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
const googleProvider = getClient(apiKey, baseUrl);
|
||||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
const model = googleProvider(modelId);
|
||||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
|
||||||
const model = googleProvider(modelId); // Correct model retrieval
|
|
||||||
|
|
||||||
const { object } = await generateObject({
|
const { object } = await generateObject({
|
||||||
model, // Pass the model instance
|
model,
|
||||||
schema,
|
schema,
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens
|
maxOutputTokens: maxTokens
|
||||||
// Note: 'objectName' or 'mode' might not be directly applicable here
|
|
||||||
// depending on how `@ai-sdk/google` handles `generateObject`.
|
|
||||||
// Check SDK docs if specific tool calling/JSON mode needs explicit setup.
|
|
||||||
});
|
});
|
||||||
|
return object;
|
||||||
return object; // Return the parsed object
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
|
|||||||
@@ -1,16 +1,26 @@
|
|||||||
import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
||||||
import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai'
|
import { generateObject } from 'ai'; // Import necessary functions from 'ai'
|
||||||
import { log } from '../../scripts/modules/utils.js';
|
import { log } from '../../scripts/modules/utils.js';
|
||||||
|
|
||||||
|
function getClient(apiKey, baseUrl) {
|
||||||
|
if (!apiKey) {
|
||||||
|
throw new Error('OpenAI API key is required.');
|
||||||
|
}
|
||||||
|
return createOpenAI({
|
||||||
|
apiKey: apiKey,
|
||||||
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using OpenAI models via Vercel AI SDK.
|
* Generates text using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If API call fails.
|
* @throws {Error} If API call fails.
|
||||||
*/
|
*/
|
||||||
export async function generateOpenAIText(params) {
|
export async function generateOpenAIText(params) {
|
||||||
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||||
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
||||||
|
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
@@ -23,18 +33,15 @@ export async function generateOpenAIText(params) {
|
|||||||
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
||||||
}
|
}
|
||||||
|
|
||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const result = await openaiClient.chat(messages, {
|
const result = await openaiClient.chat(messages, {
|
||||||
// Updated: Use openaiClient.chat directly
|
|
||||||
model: modelId,
|
model: modelId,
|
||||||
max_tokens: maxTokens,
|
max_tokens: maxTokens,
|
||||||
temperature
|
temperature
|
||||||
});
|
});
|
||||||
|
|
||||||
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
|
|
||||||
// This might need refinement based on testing the SDK's output.
|
|
||||||
const textContent = result?.choices?.[0]?.message?.content?.trim();
|
const textContent = result?.choices?.[0]?.message?.content?.trim();
|
||||||
|
|
||||||
if (!textContent) {
|
if (!textContent) {
|
||||||
@@ -65,12 +72,12 @@ export async function generateOpenAIText(params) {
|
|||||||
/**
|
/**
|
||||||
* Streams text using OpenAI models via Vercel AI SDK.
|
* Streams text using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||||
* @throws {Error} If API call fails.
|
* @throws {Error} If API call fails.
|
||||||
*/
|
*/
|
||||||
export async function streamOpenAIText(params) {
|
export async function streamOpenAIText(params) {
|
||||||
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||||
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
||||||
|
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
@@ -85,12 +92,10 @@ export async function streamOpenAIText(params) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Use the streamText function from Vercel AI SDK core
|
|
||||||
const stream = await openaiClient.chat.stream(messages, {
|
const stream = await openaiClient.chat.stream(messages, {
|
||||||
// Updated: Use openaiClient.chat.stream
|
|
||||||
model: modelId,
|
model: modelId,
|
||||||
max_tokens: maxTokens,
|
max_tokens: maxTokens,
|
||||||
temperature
|
temperature
|
||||||
@@ -100,7 +105,6 @@ export async function streamOpenAIText(params) {
|
|||||||
'debug',
|
'debug',
|
||||||
`OpenAI streamText initiated successfully for model: ${modelId}`
|
`OpenAI streamText initiated successfully for model: ${modelId}`
|
||||||
);
|
);
|
||||||
// The Vercel SDK's streamText should directly return the stream object
|
|
||||||
return stream;
|
return stream;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
@@ -117,7 +121,7 @@ export async function streamOpenAIText(params) {
|
|||||||
/**
|
/**
|
||||||
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
|
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If API call fails or object generation fails.
|
* @throws {Error} If API call fails or object generation fails.
|
||||||
*/
|
*/
|
||||||
@@ -129,7 +133,8 @@ export async function generateOpenAIObject(params) {
|
|||||||
schema,
|
schema,
|
||||||
objectName,
|
objectName,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
} = params;
|
} = params;
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
@@ -145,10 +150,9 @@ export async function generateOpenAIObject(params) {
|
|||||||
if (!objectName)
|
if (!objectName)
|
||||||
throw new Error('Object name is required for OpenAI object generation.');
|
throw new Error('Object name is required for OpenAI object generation.');
|
||||||
|
|
||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Use the imported generateObject function from 'ai' package
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: openaiClient(modelId),
|
model: openaiClient(modelId),
|
||||||
schema: schema,
|
schema: schema,
|
||||||
|
|||||||
@@ -2,6 +2,14 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
|||||||
import { generateText, streamText, generateObject } from 'ai';
|
import { generateText, streamText, generateObject } from 'ai';
|
||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
||||||
|
|
||||||
|
function getClient(apiKey, baseUrl) {
|
||||||
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
|
return createOpenRouter({
|
||||||
|
apiKey,
|
||||||
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using an OpenRouter chat model.
|
* Generates text using an OpenRouter chat model.
|
||||||
*
|
*
|
||||||
@@ -11,6 +19,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in
|
|||||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||||
* @param {number} [params.temperature] - Sampling temperature.
|
* @param {number} [params.temperature] - Sampling temperature.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -20,6 +29,7 @@ async function generateOpenRouterText({
|
|||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
|
baseUrl,
|
||||||
...rest // Capture any other Vercel AI SDK compatible parameters
|
...rest // Capture any other Vercel AI SDK compatible parameters
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -28,7 +38,7 @@ async function generateOpenRouterText({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = createOpenRouter({ apiKey });
|
const openrouter = getClient(apiKey, baseUrl);
|
||||||
const model = openrouter.chat(modelId); // Assuming chat model
|
const model = openrouter.chat(modelId); // Assuming chat model
|
||||||
|
|
||||||
const { text } = await generateText({
|
const { text } = await generateText({
|
||||||
@@ -58,6 +68,7 @@ async function generateOpenRouterText({
|
|||||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||||
* @param {number} [params.temperature] - Sampling temperature.
|
* @param {number} [params.temperature] - Sampling temperature.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||||
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -67,6 +78,7 @@ async function streamOpenRouterText({
|
|||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
|
baseUrl,
|
||||||
...rest
|
...rest
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -75,7 +87,7 @@ async function streamOpenRouterText({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = createOpenRouter({ apiKey });
|
const openrouter = getClient(apiKey, baseUrl);
|
||||||
const model = openrouter.chat(modelId);
|
const model = openrouter.chat(modelId);
|
||||||
|
|
||||||
// Directly return the stream from the Vercel AI SDK function
|
// Directly return the stream from the Vercel AI SDK function
|
||||||
@@ -108,6 +120,7 @@ async function streamOpenRouterText({
|
|||||||
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens.
|
* @param {number} [params.maxTokens] - Maximum tokens.
|
||||||
* @param {number} [params.temperature] - Temperature.
|
* @param {number} [params.temperature] - Temperature.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If the API call fails or validation fails.
|
* @throws {Error} If the API call fails or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -120,6 +133,7 @@ async function generateOpenRouterObject({
|
|||||||
maxRetries = 3,
|
maxRetries = 3,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
|
baseUrl,
|
||||||
...rest
|
...rest
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -129,7 +143,7 @@ async function generateOpenRouterObject({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = createOpenRouter({ apiKey });
|
const openrouter = getClient(apiKey, baseUrl);
|
||||||
const model = openrouter.chat(modelId);
|
const model = openrouter.chat(modelId);
|
||||||
|
|
||||||
const { object } = await generateObject({
|
const { object } = await generateObject({
|
||||||
|
|||||||
@@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js';
|
|||||||
|
|
||||||
// --- Client Instantiation ---
|
// --- Client Instantiation ---
|
||||||
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
||||||
function getClient(apiKey) {
|
function getClient(apiKey, baseUrl) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Perplexity API key is required.');
|
throw new Error('Perplexity API key is required.');
|
||||||
}
|
}
|
||||||
// Create and return a new instance directly
|
|
||||||
return createPerplexity({
|
return createPerplexity({
|
||||||
apiKey: apiKey
|
apiKey: apiKey,
|
||||||
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,6 +31,7 @@ function getClient(apiKey) {
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -39,11 +40,12 @@ export async function generatePerplexityText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -70,6 +72,7 @@ export async function generatePerplexityText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -78,11 +81,12 @@ export async function streamPerplexityText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -112,6 +116,7 @@ export async function streamPerplexityText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If generation or validation fails or is unsupported.
|
* @throws {Error} If generation or validation fails or is unsupported.
|
||||||
*/
|
*/
|
||||||
@@ -123,7 +128,8 @@ export async function generatePerplexityObject({
|
|||||||
objectName = 'generated_object',
|
objectName = 'generated_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 1 // Lower retries as support might be limited
|
maxRetries = 1,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
@@ -134,8 +140,7 @@ export async function generatePerplexityObject({
|
|||||||
'generateObject support for Perplexity might be limited or experimental.'
|
'generateObject support for Perplexity might be limited or experimental.'
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
// Attempt using generateObject, but be prepared for potential issues
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
schema: schema,
|
schema: schema,
|
||||||
|
|||||||
@@ -9,14 +9,13 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh
|
|||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||||
|
|
||||||
// --- Client Instantiation ---
|
// --- Client Instantiation ---
|
||||||
function getClient(apiKey) {
|
function getClient(apiKey, baseUrl) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('xAI API key is required.');
|
throw new Error('xAI API key is required.');
|
||||||
}
|
}
|
||||||
// Create and return a new instance directly
|
|
||||||
return createXai({
|
return createXai({
|
||||||
apiKey: apiKey
|
apiKey: apiKey,
|
||||||
// Add baseURL or other options if needed later
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,6 +30,7 @@ function getClient(apiKey) {
|
|||||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -39,13 +39,14 @@ export async function generateXaiText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating xAI text with model: ${modelId}`);
|
log('debug', `Generating xAI text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId), // Correct model invocation
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
@@ -70,6 +71,7 @@ export async function generateXaiText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -78,18 +80,19 @@ export async function streamXaiText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming xAI text with model: ${modelId}`);
|
log('debug', `Streaming xAI text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId), // Correct model invocation
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
});
|
});
|
||||||
return stream; // Return the full stream object
|
return stream;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
||||||
throw error;
|
throw error;
|
||||||
@@ -110,6 +113,7 @@ export async function streamXaiText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If generation or validation fails.
|
* @throws {Error} If generation or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -121,16 +125,17 @@ export async function generateXaiObject({
|
|||||||
objectName = 'generated_xai_object',
|
objectName = 'generated_xai_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 3
|
maxRetries = 3,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'warn', // Log warning as this is likely unsupported
|
'warn',
|
||||||
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId), // Correct model invocation
|
model: client(modelId),
|
||||||
// Note: mode might need adjustment if xAI ever supports object generation differently
|
// Note: mode might need adjustment if xAI ever supports object generation differently
|
||||||
mode: 'tool',
|
mode: 'tool',
|
||||||
schema: schema,
|
schema: schema,
|
||||||
@@ -153,6 +158,6 @@ export async function generateXaiObject({
|
|||||||
'error',
|
'error',
|
||||||
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
||||||
);
|
);
|
||||||
throw error; // Re-throw the error
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
32
src/constants/task-status.js
Normal file
32
src/constants/task-status.js
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
/**
|
||||||
|
* @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Task status options list
|
||||||
|
* @type {TaskStatus[]}
|
||||||
|
* @description Defines possible task statuses:
|
||||||
|
* - pending: Task waiting to start
|
||||||
|
* - done: Task completed
|
||||||
|
* - in-progress: Task in progress
|
||||||
|
* - review: Task completed and waiting for review
|
||||||
|
* - deferred: Task postponed or paused
|
||||||
|
* - cancelled: Task cancelled and will not be completed
|
||||||
|
*/
|
||||||
|
export const TASK_STATUS_OPTIONS = [
|
||||||
|
'pending',
|
||||||
|
'done',
|
||||||
|
'in-progress',
|
||||||
|
'review',
|
||||||
|
'deferred',
|
||||||
|
'cancelled'
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a given status is a valid task status
|
||||||
|
* @param {string} status - The status to check
|
||||||
|
* @returns {boolean} True if the status is valid, false otherwise
|
||||||
|
*/
|
||||||
|
export function isValidTaskStatus(status) {
|
||||||
|
return TASK_STATUS_OPTIONS.includes(status);
|
||||||
|
}
|
||||||
35
src/utils/getVersion.js
Normal file
35
src/utils/getVersion.js
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import { log } from '../../scripts/modules/utils.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads the version from the nearest package.json relative to this file.
|
||||||
|
* Returns 'unknown' if not found or on error.
|
||||||
|
* @returns {string} The version string or 'unknown'.
|
||||||
|
*/
|
||||||
|
export function getTaskMasterVersion() {
|
||||||
|
let version = 'unknown';
|
||||||
|
try {
|
||||||
|
// Get the directory of the current module (getPackageVersion.js)
|
||||||
|
const currentModuleFilename = fileURLToPath(import.meta.url);
|
||||||
|
const currentModuleDirname = path.dirname(currentModuleFilename);
|
||||||
|
// Construct the path to package.json relative to this file (../../package.json)
|
||||||
|
const packageJsonPath = path.join(
|
||||||
|
currentModuleDirname,
|
||||||
|
'..',
|
||||||
|
'..',
|
||||||
|
'package.json'
|
||||||
|
);
|
||||||
|
|
||||||
|
if (fs.existsSync(packageJsonPath)) {
|
||||||
|
const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8');
|
||||||
|
const packageJson = JSON.parse(packageJsonContent);
|
||||||
|
version = packageJson.version;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Silently fall back to default version
|
||||||
|
log('warn', 'Could not read own package.json for version info.', error);
|
||||||
|
}
|
||||||
|
return version;
|
||||||
|
}
|
||||||
@@ -3,9 +3,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { jest } from '@jest/globals';
|
import { jest } from '@jest/globals';
|
||||||
import path from 'path';
|
import path, { dirname } from 'path';
|
||||||
import { fileURLToPath } from 'url';
|
import { fileURLToPath } from 'url';
|
||||||
import { dirname } from 'path';
|
|
||||||
|
|
||||||
// Get the current module's directory
|
// Get the current module's directory
|
||||||
const __filename = fileURLToPath(import.meta.url);
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
@@ -27,6 +26,7 @@ const mockReadJSON = jest.fn();
|
|||||||
const mockWriteJSON = jest.fn();
|
const mockWriteJSON = jest.fn();
|
||||||
const mockEnableSilentMode = jest.fn();
|
const mockEnableSilentMode = jest.fn();
|
||||||
const mockDisableSilentMode = jest.fn();
|
const mockDisableSilentMode = jest.fn();
|
||||||
|
const mockReadComplexityReport = jest.fn().mockReturnValue(null);
|
||||||
|
|
||||||
const mockGetAnthropicClient = jest.fn().mockReturnValue({});
|
const mockGetAnthropicClient = jest.fn().mockReturnValue({});
|
||||||
const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({});
|
const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({});
|
||||||
@@ -130,6 +130,7 @@ jest.mock('../../../scripts/modules/utils.js', () => ({
|
|||||||
writeJSON: mockWriteJSON,
|
writeJSON: mockWriteJSON,
|
||||||
enableSilentMode: mockEnableSilentMode,
|
enableSilentMode: mockEnableSilentMode,
|
||||||
disableSilentMode: mockDisableSilentMode,
|
disableSilentMode: mockDisableSilentMode,
|
||||||
|
readComplexityReport: mockReadComplexityReport,
|
||||||
CONFIG: {
|
CONFIG: {
|
||||||
model: 'claude-3-7-sonnet-20250219',
|
model: 'claude-3-7-sonnet-20250219',
|
||||||
maxTokens: 64000,
|
maxTokens: 64000,
|
||||||
@@ -160,15 +161,6 @@ jest.mock('../../../scripts/modules/task-manager.js', () => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
// Import dependencies after mocks are set up
|
// Import dependencies after mocks are set up
|
||||||
import fs from 'fs';
|
|
||||||
import {
|
|
||||||
readJSON,
|
|
||||||
writeJSON,
|
|
||||||
enableSilentMode,
|
|
||||||
disableSilentMode
|
|
||||||
} from '../../../scripts/modules/utils.js';
|
|
||||||
import { expandTask } from '../../../scripts/modules/task-manager.js';
|
|
||||||
import { findTasksJsonPath } from '../../../mcp-server/src/core/utils/path-utils.js';
|
|
||||||
import { sampleTasks } from '../../fixtures/sample-tasks.js';
|
import { sampleTasks } from '../../fixtures/sample-tasks.js';
|
||||||
|
|
||||||
// Mock logger
|
// Mock logger
|
||||||
@@ -220,6 +212,37 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
describe('listTasksDirect', () => {
|
describe('listTasksDirect', () => {
|
||||||
|
// Sample complexity report for testing
|
||||||
|
const mockComplexityReport = {
|
||||||
|
meta: {
|
||||||
|
generatedAt: '2025-03-24T20:01:35.986Z',
|
||||||
|
tasksAnalyzed: 3,
|
||||||
|
thresholdScore: 5,
|
||||||
|
projectName: 'Test Project',
|
||||||
|
usedResearch: false
|
||||||
|
},
|
||||||
|
complexityAnalysis: [
|
||||||
|
{
|
||||||
|
taskId: 1,
|
||||||
|
taskTitle: 'Initialize Project',
|
||||||
|
complexityScore: 3,
|
||||||
|
recommendedSubtasks: 2
|
||||||
|
},
|
||||||
|
{
|
||||||
|
taskId: 2,
|
||||||
|
taskTitle: 'Create Core Functionality',
|
||||||
|
complexityScore: 8,
|
||||||
|
recommendedSubtasks: 5
|
||||||
|
},
|
||||||
|
{
|
||||||
|
taskId: 3,
|
||||||
|
taskTitle: 'Implement UI Components',
|
||||||
|
complexityScore: 6,
|
||||||
|
recommendedSubtasks: 4
|
||||||
|
}
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
// Test wrapper function that doesn't rely on the actual implementation
|
// Test wrapper function that doesn't rely on the actual implementation
|
||||||
async function testListTasks(args, mockLogger) {
|
async function testListTasks(args, mockLogger) {
|
||||||
// File not found case
|
// File not found case
|
||||||
@@ -235,21 +258,35 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for complexity report
|
||||||
|
const complexityReport = mockReadComplexityReport();
|
||||||
|
let tasksData = [...sampleTasks.tasks];
|
||||||
|
|
||||||
|
// Add complexity scores if report exists
|
||||||
|
if (complexityReport && complexityReport.complexityAnalysis) {
|
||||||
|
tasksData = tasksData.map((task) => {
|
||||||
|
const analysis = complexityReport.complexityAnalysis.find(
|
||||||
|
(a) => a.taskId === task.id
|
||||||
|
);
|
||||||
|
if (analysis) {
|
||||||
|
return { ...task, complexityScore: analysis.complexityScore };
|
||||||
|
}
|
||||||
|
return task;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Success case
|
// Success case
|
||||||
if (!args.status && !args.withSubtasks) {
|
if (!args.status && !args.withSubtasks) {
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
tasks: sampleTasks.tasks,
|
tasks: tasksData,
|
||||||
stats: {
|
stats: {
|
||||||
total: sampleTasks.tasks.length,
|
total: tasksData.length,
|
||||||
completed: sampleTasks.tasks.filter((t) => t.status === 'done')
|
completed: tasksData.filter((t) => t.status === 'done').length,
|
||||||
|
inProgress: tasksData.filter((t) => t.status === 'in-progress')
|
||||||
.length,
|
.length,
|
||||||
inProgress: sampleTasks.tasks.filter(
|
pending: tasksData.filter((t) => t.status === 'pending').length
|
||||||
(t) => t.status === 'in-progress'
|
|
||||||
).length,
|
|
||||||
pending: sampleTasks.tasks.filter((t) => t.status === 'pending')
|
|
||||||
.length
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
@@ -258,16 +295,14 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
|
|
||||||
// Status filter case
|
// Status filter case
|
||||||
if (args.status) {
|
if (args.status) {
|
||||||
const filteredTasks = sampleTasks.tasks.filter(
|
const filteredTasks = tasksData.filter((t) => t.status === args.status);
|
||||||
(t) => t.status === args.status
|
|
||||||
);
|
|
||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
tasks: filteredTasks,
|
tasks: filteredTasks,
|
||||||
filter: args.status,
|
filter: args.status,
|
||||||
stats: {
|
stats: {
|
||||||
total: sampleTasks.tasks.length,
|
total: tasksData.length,
|
||||||
filtered: filteredTasks.length
|
filtered: filteredTasks.length
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -280,10 +315,10 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
return {
|
return {
|
||||||
success: true,
|
success: true,
|
||||||
data: {
|
data: {
|
||||||
tasks: sampleTasks.tasks,
|
tasks: tasksData,
|
||||||
includeSubtasks: true,
|
includeSubtasks: true,
|
||||||
stats: {
|
stats: {
|
||||||
total: sampleTasks.tasks.length
|
total: tasksData.length
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
fromCache: false
|
fromCache: false
|
||||||
@@ -370,6 +405,29 @@ describe('MCP Server Direct Functions', () => {
|
|||||||
expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR');
|
expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR');
|
||||||
expect(mockLogger.error).toHaveBeenCalled();
|
expect(mockLogger.error).toHaveBeenCalled();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('should include complexity scores when complexity report exists', async () => {
|
||||||
|
// Arrange
|
||||||
|
mockReadComplexityReport.mockReturnValueOnce(mockComplexityReport);
|
||||||
|
const args = {
|
||||||
|
projectRoot: testProjectRoot,
|
||||||
|
file: testTasksPath,
|
||||||
|
withSubtasks: true
|
||||||
|
};
|
||||||
|
|
||||||
|
// Act
|
||||||
|
const result = await testListTasks(args, mockLogger);
|
||||||
|
// Assert
|
||||||
|
expect(result.success).toBe(true);
|
||||||
|
|
||||||
|
// Check that tasks have complexity scores from the report
|
||||||
|
mockComplexityReport.complexityAnalysis.forEach((analysis) => {
|
||||||
|
const task = result.data.tasks.find((t) => t.id === analysis.taskId);
|
||||||
|
if (task) {
|
||||||
|
expect(task.complexityScore).toBe(analysis.complexityScore);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('expandTaskDirect', () => {
|
describe('expandTaskDirect', () => {
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ process.env.MODEL = 'sonar-pro';
|
|||||||
process.env.MAX_TOKENS = '64000';
|
process.env.MAX_TOKENS = '64000';
|
||||||
process.env.TEMPERATURE = '0.2';
|
process.env.TEMPERATURE = '0.2';
|
||||||
process.env.DEBUG = 'false';
|
process.env.DEBUG = 'false';
|
||||||
process.env.LOG_LEVEL = 'error'; // Set to error to reduce noise in tests
|
process.env.TASKMASTER_LOG_LEVEL = 'error'; // Set to error to reduce noise in tests
|
||||||
process.env.DEFAULT_SUBTASKS = '5';
|
process.env.DEFAULT_SUBTASKS = '5';
|
||||||
process.env.DEFAULT_PRIORITY = 'medium';
|
process.env.DEFAULT_PRIORITY = 'medium';
|
||||||
process.env.PROJECT_NAME = 'Test Project';
|
process.env.PROJECT_NAME = 'Test Project';
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ const mockGetResearchModelId = jest.fn();
|
|||||||
const mockGetFallbackProvider = jest.fn();
|
const mockGetFallbackProvider = jest.fn();
|
||||||
const mockGetFallbackModelId = jest.fn();
|
const mockGetFallbackModelId = jest.fn();
|
||||||
const mockGetParametersForRole = jest.fn();
|
const mockGetParametersForRole = jest.fn();
|
||||||
|
const mockGetBaseUrlForRole = jest.fn();
|
||||||
|
|
||||||
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||||
getMainProvider: mockGetMainProvider,
|
getMainProvider: mockGetMainProvider,
|
||||||
@@ -16,7 +17,8 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
|||||||
getResearchModelId: mockGetResearchModelId,
|
getResearchModelId: mockGetResearchModelId,
|
||||||
getFallbackProvider: mockGetFallbackProvider,
|
getFallbackProvider: mockGetFallbackProvider,
|
||||||
getFallbackModelId: mockGetFallbackModelId,
|
getFallbackModelId: mockGetFallbackModelId,
|
||||||
getParametersForRole: mockGetParametersForRole
|
getParametersForRole: mockGetParametersForRole,
|
||||||
|
getBaseUrlForRole: mockGetBaseUrlForRole
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Mock AI Provider Modules
|
// Mock AI Provider Modules
|
||||||
|
|||||||
@@ -2,8 +2,9 @@
|
|||||||
* Task finder tests
|
* Task finder tests
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// Import after mocks are set up - No mocks needed for readComplexityReport anymore
|
||||||
import { findTaskById } from '../../scripts/modules/utils.js';
|
import { findTaskById } from '../../scripts/modules/utils.js';
|
||||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
import { emptySampleTasks, sampleTasks } from '../fixtures/sample-tasks.js';
|
||||||
|
|
||||||
describe('Task Finder', () => {
|
describe('Task Finder', () => {
|
||||||
describe('findTaskById function', () => {
|
describe('findTaskById function', () => {
|
||||||
@@ -55,5 +56,62 @@ describe('Task Finder', () => {
|
|||||||
expect(result.task).toBeNull();
|
expect(result.task).toBeNull();
|
||||||
expect(result.originalSubtaskCount).toBeNull();
|
expect(result.originalSubtaskCount).toBeNull();
|
||||||
});
|
});
|
||||||
|
test('should work correctly when no complexity report is provided', () => {
|
||||||
|
// Pass null as the complexity report
|
||||||
|
const result = findTaskById(sampleTasks.tasks, 2, null);
|
||||||
|
|
||||||
|
expect(result.task).toBeDefined();
|
||||||
|
expect(result.task.id).toBe(2);
|
||||||
|
expect(result.task.complexityScore).toBeUndefined();
|
||||||
|
});
|
||||||
|
test('should work correctly when task has no complexity data in the provided report', () => {
|
||||||
|
// Define a complexity report that doesn't include task 2
|
||||||
|
const complexityReport = {
|
||||||
|
complexityAnalysis: [{ taskId: 999, complexityScore: 5 }]
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = findTaskById(sampleTasks.tasks, 2, complexityReport);
|
||||||
|
|
||||||
|
expect(result.task).toBeDefined();
|
||||||
|
expect(result.task.id).toBe(2);
|
||||||
|
expect(result.task.complexityScore).toBeUndefined();
|
||||||
|
});
|
||||||
|
|
||||||
|
test('should include complexity score when report is provided', () => {
|
||||||
|
// Define the complexity report for this test
|
||||||
|
const complexityReport = {
|
||||||
|
meta: {
|
||||||
|
generatedAt: '2023-01-01T00:00:00.000Z',
|
||||||
|
tasksAnalyzed: 3,
|
||||||
|
thresholdScore: 5
|
||||||
|
},
|
||||||
|
complexityAnalysis: [
|
||||||
|
{
|
||||||
|
taskId: 1,
|
||||||
|
taskTitle: 'Initialize Project',
|
||||||
|
complexityScore: 3,
|
||||||
|
recommendedSubtasks: 2
|
||||||
|
},
|
||||||
|
{
|
||||||
|
taskId: 2,
|
||||||
|
taskTitle: 'Create Core Functionality',
|
||||||
|
complexityScore: 8,
|
||||||
|
recommendedSubtasks: 5
|
||||||
|
},
|
||||||
|
{
|
||||||
|
taskId: 3,
|
||||||
|
taskTitle: 'Implement UI Components',
|
||||||
|
complexityScore: 6,
|
||||||
|
recommendedSubtasks: 4
|
||||||
|
}
|
||||||
|
]
|
||||||
|
};
|
||||||
|
|
||||||
|
const result = findTaskById(sampleTasks.tasks, 2, complexityReport);
|
||||||
|
|
||||||
|
expect(result.task).toBeDefined();
|
||||||
|
expect(result.task.id).toBe(2);
|
||||||
|
expect(result.task.complexityScore).toBe(8);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -199,6 +199,12 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
|||||||
|
|
||||||
// Simplified version of updateSingleTaskStatus for testing
|
// Simplified version of updateSingleTaskStatus for testing
|
||||||
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||||
|
if (!isValidTaskStatus(newStatus)) {
|
||||||
|
throw new Error(
|
||||||
|
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// Check if it's a subtask (e.g., "1.2")
|
// Check if it's a subtask (e.g., "1.2")
|
||||||
if (taskIdInput.includes('.')) {
|
if (taskIdInput.includes('.')) {
|
||||||
const [parentId, subtaskId] = taskIdInput
|
const [parentId, subtaskId] = taskIdInput
|
||||||
@@ -329,6 +335,10 @@ const testAddTask = (
|
|||||||
import * as taskManager from '../../scripts/modules/task-manager.js';
|
import * as taskManager from '../../scripts/modules/task-manager.js';
|
||||||
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
|
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
|
||||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
||||||
|
import {
|
||||||
|
isValidTaskStatus,
|
||||||
|
TASK_STATUS_OPTIONS
|
||||||
|
} from '../../src/constants/task-status.js';
|
||||||
|
|
||||||
// Destructure the required functions for convenience
|
// Destructure the required functions for convenience
|
||||||
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
|
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
|
||||||
@@ -1165,6 +1175,16 @@ describe('Task Manager Module', () => {
|
|||||||
expect(testTasksData.tasks[1].status).toBe('done');
|
expect(testTasksData.tasks[1].status).toBe('done');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('should throw error for invalid status', async () => {
|
||||||
|
// Arrange
|
||||||
|
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
expect(() =>
|
||||||
|
testUpdateSingleTaskStatus(testTasksData, '2', 'Done')
|
||||||
|
).toThrow(/Error: Invalid status value: Done./);
|
||||||
|
});
|
||||||
|
|
||||||
test('should update subtask status', async () => {
|
test('should update subtask status', async () => {
|
||||||
// Arrange
|
// Arrange
|
||||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||||
|
|||||||
Reference in New Issue
Block a user