Compare commits

...

32 Commits

Author SHA1 Message Date
Ralph Khreish
3592736451 Merge remote-tracking branch 'origin/next' into fix/set-task-status 2025-05-16 15:43:56 +02:00
Ralph Khreish
ed17cb0e0a feat: implement baseUrls on all ai providers(#521) 2025-05-16 15:34:29 +02:00
shenysun
e5ed10275e fix: update import path 2025-05-16 14:05:34 +08:00
Ralph Khreish
e96734a6cc fix: updateTask enableSilentMode is not defined (#517)
- Closes #412
2025-05-15 22:56:52 +02:00
Ralph Khreish
17294ff259 Fix: Correct version resolution for banner and update check (#511)
* Fix: Correct version resolution for banner and update check

Resolves issues where the tool's version was displayed as 'unknown'.

- Modified 'displayBanner' in 'ui.js' and 'checkForUpdate' in 'commands.js' to read package.json relative to their own script locations using import.meta.url.
- This ensures the correct local version is identified for both the main banner display and the update notification mechanism.
- Restored a missing closing brace in 'ui.js' to fix a SyntaxError.

* fix: refactor and cleanup

* fix: chores and cleanup and testing

* chore: cleanup

* fix: add changeset

---------

Co-authored-by: Christer Soederlund <christer.soderlund@gmail.com>
2025-05-15 22:41:16 +02:00
shenysun
97bf01a0ac fix: error handling of task status settings 2025-05-15 21:18:30 +08:00
Lars Bell
a96215a359 Update .taskmasterconfig (#435)
* Update .taskmasterconfig

Max tokens in 3.5 is lower.  With the current number get this error:

Service call failed for role fallback (Provider: anthropic, Model: claude-3-5-sonnet-20240620): max_tokens: 120000 > 8192, which is the maximum allowed number of output tokens for claude-3-5-sonnet-20240620

* Fix fallback model ID format and update maxTokens in Taskmaster configuration

---------

Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com>
2025-05-15 13:01:21 +02:00
Ralph Khreish
0a611843b5 fix: Inline comments in .env.example conflicting with env variable values (#501)
* fix: Update API key format in env.example to use quotes for consistency

* chore: add changelog
2025-05-15 01:32:49 +02:00
Kayvan Sylvan
a1f8d52474 chore: rename log level environment variable to TASKMASTER_LOG_LEVEL (#417)
* chore: rename log level environment variable to `TASKMASTER_LOG_LEVEL`

### CHANGES
- Update environment variable from `LOG_LEVEL` to `TASKMASTER_LOG_LEVEL`.
- Reflect change in documentation for clarity.
- Adjust variable name in script and test files.
- Maintain default log level as `info`.

* fix: add changeset

* chore: rename `LOG_LEVEL` to `TASKMASTER_LOG_LEVEL` for consistency

### CHANGES
- Update environment variable name to `TASKMASTER_LOG_LEVEL` in documentation.
- Reflect rename in configuration rules for clarity.
- Maintain consistency across project configuration settings.
2025-05-15 01:09:41 +02:00
Ralph Khreish
c47deeb869 Merge remote-tracking branch 'origin/main' into next 2025-05-15 00:29:54 +02:00
github-actions[bot]
dd90c9cb5d Version Packages 2025-05-15 00:29:11 +02:00
Ralph Khreish
c7042845d6 chore: improve CI to better accomodate pre-releases for testing (#507) 2025-05-15 00:28:06 +02:00
Joe Danziger
efce37469b Fix duplicate output on CLI help screen (#496)
* remove duplication

* add changeset

* fix formatting
2025-05-14 13:12:15 +02:00
Joe Danziger
4117f71c18 Fix CLI --force flag on parse-prd command 2025-05-13 22:06:09 +02:00
Ralph Khreish
09d839fff5 Merge pull request #405 from eyaltoledano/changeset-release/main
Version Packages
2025-05-03 20:46:10 +02:00
github-actions[bot]
90068348d3 Version Packages 2025-05-03 18:13:24 +00:00
Ralph Khreish
02e347d2d7 Merge pull request #404 from eyaltoledano/next
Release 0.13.2
2025-05-03 20:13:05 +02:00
Ralph Khreish
0527c363e3 Merge remote-tracking branch 'origin/main' into next 2025-05-03 19:32:07 +02:00
Ralph Khreish
735135efe9 chore: allow github actions to commit 2025-05-03 19:24:00 +02:00
Ralph Khreish
4fee667a05 chore: improve pre-release workflow 2025-05-03 19:07:42 +02:00
Ralph Khreish
01963af2cb Fix: issues with 0.13.0 not working (#402)
* Exit prerelease mode and version packages

* hotfix: move production package to "dependencies"

* Enter prerelease mode and version packages

* Enter prerelease mode and version packages

* chore: cleanup

* chore: improve pre.json and add pre-release workflow

* chore: fix package.json

* chore: cleanup
2025-05-03 18:55:18 +02:00
Ralph Khreish
0633895f3b Version Packages (#401)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-05-03 17:02:05 +02:00
github-actions[bot]
10442c1119 Version Packages 2025-05-03 14:56:40 +00:00
Ralph Khreish
734a4fdcfc hotfix: move production package to "dependencies" (#399) 2025-05-03 16:56:17 +02:00
Ralph Khreish
8dace2186c Merge pull request #390 from eyaltoledano/changeset-release/main
Version Packages
2025-05-03 10:17:11 +02:00
github-actions[bot]
095e373843 Version Packages 2025-05-03 08:14:02 +00:00
Ralph Khreish
0bc9bac392 Merge pull request #369 from eyaltoledano/next
Release 0.13.0
2025-05-03 10:13:43 +02:00
Eyal Toledano
0a45f4329c Merge pull request #389 from eyaltoledano/v013-final
fix(config): restores sonnet 3.7 as default main role.
2025-05-03 02:59:44 -04:00
Eyal Toledano
c4b2f7e514 fix(config): restores sonnet 3.7 as default main role. 2025-05-03 02:28:40 -04:00
Eyal Toledano
9684beafc3 Merge pull request #388 from eyaltoledano/readme-init-typo
chore: readme typos
2025-05-03 02:19:49 -04:00
Eyal Toledano
302b916045 chore: readme typos 2025-05-03 02:17:52 -04:00
Eyal Toledano
e7f18f65b9 Merge pull request #387 from eyaltoledano/v0.13-touchups
fix: improve error handling, test options, and model configuration

Final polish for v0.13.x
2025-05-03 02:12:40 -04:00
66 changed files with 554 additions and 376 deletions

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': patch
---
Resolve all issues related to MCP

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': patch
---
- Add support for Google Gemini models via Vercel AI SDK integration.

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': patch
---
Add xAI provider and Grok models support

View File

@@ -1,8 +0,0 @@
---
'task-master-ai': minor
---
feat(expand): Enhance `expand` and `expand-all` commands
- Integrate `task-complexity-report.json` to automatically determine the number of subtasks and use tailored prompts for expansion based on prior analysis. You no longer need to try copy-pasting the recommended prompt. If it exists, it will use it for you. You can just run `task-master update --id=[id of task] --research` and it will use that prompt automatically. No extra prompt needed.
- Change default behavior to *append* new subtasks to existing ones. Use the `--force` flag to clear existing subtasks before expanding. This is helpful if you need to add more subtasks to a task but you want to do it by the batch from a given prompt. Use force if you want to start fresh with a task's subtasks.

View File

@@ -1,9 +0,0 @@
---
'task-master-ai': patch
---
Better support for file paths on Windows, Linux & WSL.
- Standardizes handling of different path formats (URI encoded, Windows, Linux, WSL).
- Ensures tools receive a clean, absolute path suitable for the server OS.
- Simplifies tool implementation by centralizing normalization logic.

View File

@@ -1,7 +0,0 @@
---
'task-master-ai': minor
---
Adds support for the OpenRouter AI provider. Users can now configure models available through OpenRouter (requiring an `OPENROUTER_API_KEY`) via the `task-master models` command, granting access to a wide range of additional LLMs.
- IMPORTANT FYI ABOUT OPENROUTER: Taskmaster relies on AI SDK, which itself relies on tool use. It looks like **free** models sometimes do not include tool use. For example, Gemini 2.5 pro (free) failed via OpenRouter (no tool use) but worked fine on the paid version of the model. Custom model support for Open Router is considered experimental and likely will not be further improved for some time.

View File

@@ -1,8 +0,0 @@
---
'task-master-ai': patch
---
Improved update-subtask
- Now it has context about the parent task details
- It also has context about the subtask before it and the subtask after it (if they exist)
- Not passing all subtasks to stay token efficient

View File

@@ -1,13 +0,0 @@
---
'task-master-ai': patch
---
Improve and adjust `init` command for robustness and updated dependencies.
- **Update Initialization Dependencies:** Ensure newly initialized projects (`task-master init`) include all required AI SDK dependencies (`@ai-sdk/*`, `ai`, provider wrappers) in their `package.json` for out-of-the-box AI feature compatibility. Remove unnecessary dependencies (e.g., `uuid`) from the init template.
- **Silence `npm install` during `init`:** Prevent `npm install` output from interfering with non-interactive/MCP initialization by suppressing its stdio in silent mode.
- **Improve Conditional Model Setup:** Reliably skip interactive `models --setup` during non-interactive `init` runs (e.g., `init -y` or MCP) by checking `isSilentMode()` instead of passing flags.
- **Refactor `init.js`:** Remove internal `isInteractive` flag logic.
- **Update `init` Instructions:** Tweak the "Getting Started" text displayed after `init`.
- **Fix MCP Server Launch:** Update `.cursor/mcp.json` template to use `node ./mcp-server/server.js` instead of `npx task-master-mcp`.
- **Update Default Model:** Change the default main model in the `.taskmasterconfig` template.

View File

@@ -0,0 +1,9 @@
---
'task-master-ai': patch
---
Fix CLI --force flag for parse-prd command
Previously, the --force flag was not respected when running `parse-prd`, causing the command to prompt for confirmation or fail even when --force was provided. This patch ensures that the flag is correctly passed and handled, allowing users to overwrite existing tasks.json files as intended.
- Fixes #477

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': minor
---
.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider.

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': patch
---
Fixes an issue with add-task which did not use the manually defined properties and still needlessly hit the AI endpoint.

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': patch
---
Task Master no longer tells you to update when you're already up to date

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': minor
---
Adds model management and new configuration file .taskmasterconfig which houses the models used for main, research and fallback. Adds models command and setter flags. Adds a --setup flag with an interactive setup. We should be calling this during init. Shows a table of active and available models when models is called without flags. Includes SWE scores and token costs, which are manually entered into the supported_models.json, the new place where models are defined for support. Config-manager.js is the core module responsible for managing the new config."

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': patch
---
Fixes an issue that prevented remove-subtask with comma separated tasks/subtasks from being deleted (only the first ID was being deleted). Closes #140

View File

@@ -1,10 +0,0 @@
---
'task-master-ai': patch
---
Improves next command to be subtask-aware
- The logic for determining the "next task" (findNextTask function, used by task-master next and the next_task MCP tool) has been significantly improved. Previously, it only considered top-level tasks, making its recommendation less useful when a parent task containing subtasks was already marked 'in-progress'.
- The updated logic now prioritizes finding the next available subtask within any 'in-progress' parent task, considering subtask dependencies and priority.
- If no suitable subtask is found within active parent tasks, it falls back to recommending the next eligible top-level task based on the original criteria (status, dependencies, priority).
This change makes the next command much more relevant and helpful during the implementation phase of complex tasks.

View File

@@ -1,11 +0,0 @@
---
'task-master-ai': minor
---
Adds custom model ID support for Ollama and OpenRouter providers.
- Adds the `--ollama` and `--openrouter` flags to `task-master models --set-<role>` command to set models for those providers outside of the support models list.
- Updated `task-master models --setup` interactive mode with options to explicitly enter custom Ollama or OpenRouter model IDs.
- Implemented live validation against OpenRouter API (`/api/v1/models`) when setting a custom OpenRouter model ID (via flag or setup).
- Refined logic to prioritize explicit provider flags/choices over internal model list lookups in case of ID conflicts.
- Added warnings when setting custom/unvalidated models.
- We obviously don't recommend going with a custom, unproven model. If you do and find performance is good, please let us know so we can add it to the list of supported models.

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': patch
---
Add `--status` flag to `show` command to filter displayed subtasks.

12
.changeset/pre.json Normal file
View File

@@ -0,0 +1,12 @@
{
"mode": "exit",
"tag": "rc",
"initialVersions": {
"task-master-ai": "0.13.2"
},
"changesets": [
"beige-doodles-type",
"red-oranges-attend",
"red-suns-wash"
]
}

View File

@@ -1,7 +0,0 @@
---
'task-master-ai': minor
---
Integrate OpenAI as a new AI provider.
- Enhance `models` command/tool to display API key status.
- Implement model-specific `maxTokens` override based on `supported-models.json` to save you if you use an incorrect max token value.

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': patch
---
Fix ERR_MODULE_NOT_FOUND when trying to run MCP Server

View File

@@ -2,4 +2,4 @@
'task-master-ai': patch
---
Add integration for Roo Code
Add src directory to exports

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': patch
---
Fix the error handling of task status settings

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': patch
---
Fix for issue #409 LOG_LEVEL Pydantic validation error

View File

@@ -0,0 +1,7 @@
---
'task-master-ai': patch
---
Fix initial .env.example to work out of the box
- Closes #419

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': patch
---
Fix default fallback model and maxTokens in Taskmaster initialization

View File

@@ -1,9 +0,0 @@
---
'task-master-ai': minor
---
Tweaks Perplexity AI calls for research mode to max out input tokens and get day-fresh information
- Forces temp at 0.1 for highly deterministic output, no variations
- Adds a system prompt to further improve the output
- Correctly uses the maximum input tokens (8,719, used 8,700) for perplexity
- Specificies to use a high degree of research across the web
- Specifies to use information that is as fresh as today; this support stuff like capturing brand new announcements like new GPT models and being able to query for those in research. 🔥

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': patch
---
Fix bug when updating tasks on the MCP server (#412)

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': patch
---
Fix --task to --num-tasks in ui + related tests - issue #324

View File

@@ -1,9 +0,0 @@
---
'task-master-ai': patch
---
Adds a 'models' CLI and MCP command to get the current model configuration, available models, and gives the ability to set main/research/fallback models."
- In the CLI, `task-master models` shows the current models config. Using the `--setup` flag launches an interactive set up that allows you to easily select the models you want to use for each of the three roles. Use `q` during the interactive setup to cancel the setup.
- In the MCP, responses are simplified in RESTful format (instead of the full CLI output). The agent can use the `models` tool with different arguments, including `listAvailableModels` to get available models. Run without arguments, it will return the current configuration. Arguments are available to set the model for each of the three roles. This allows you to manage Taskmaster AI providers and models directly from either the CLI or MCP or both.
- Updated the CLI help menu when you run `task-master` to include missing commands and .taskmasterconfig information.
- Adds `--research` flag to `add-task` so you can hit up Perplexity right from the add-task flow, rather than having to add a task and then update it.

View File

@@ -0,0 +1,11 @@
---
'task-master-ai': patch
---
Fix duplicate output on CLI help screen
- Prevent the Task Master CLI from printing the help screen more than once when using `-h` or `--help`.
- Removed redundant manual event handlers and guards for help output; now only the Commander `.helpInformation` override is used for custom help.
- Simplified logic so that help is only shown once for both "no arguments" and help flag flows.
- Ensures a clean, branded help experience with no repeated content.
- Fixes #339

View File

@@ -116,7 +116,7 @@ Taskmaster configuration is managed through two main mechanisms:
* For MCP/Cursor integration, configure these keys in the `env` section of `.cursor/mcp.json`.
* Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.mdc`).
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.cursor/mcp.json`.
**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.

62
.github/workflows/pre-release.yml vendored Normal file
View File

@@ -0,0 +1,62 @@
name: Pre-Release (RC)
on:
workflow_dispatch: # Allows manual triggering from GitHub UI/API
concurrency: pre-release-${{ github.ref }}
jobs:
rc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- name: Cache node_modules
uses: actions/cache@v4
with:
path: |
node_modules
*/*/node_modules
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install dependencies
run: npm ci
timeout-minutes: 2
- name: Enter RC mode
run: |
npx changeset pre exit || true
npx changeset pre enter rc
- name: Version RC packages
run: npx changeset version
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Create Release Candidate Pull Request or Publish Release Candidate to npm
uses: changesets/action@v1
with:
publish: npm run release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Exit RC mode
run: npx changeset pre exit
- name: Commit & Push changes
uses: actions-js/push@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
branch: ${{ github.ref }}
message: 'chore: rc version bump'

View File

@@ -33,6 +33,9 @@ jobs:
run: npm ci
timeout-minutes: 2
- name: Exit pre-release mode (safety check)
run: npx changeset pre exit || true
- name: Create Release Pull Request or Publish to npm
uses: changesets/action@v1
with:

3
.gitignore vendored
View File

@@ -61,3 +61,6 @@ dist
*.debug
init-debug.log
dev-debug.log
# NPMRC
.npmrc

View File

@@ -1,8 +1,8 @@
{
"models": {
"main": {
"provider": "google",
"modelId": "gemini-2.0-flash",
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 100000,
"temperature": 0.2
},

View File

@@ -1,5 +1,77 @@
# task-master-ai
## 0.13.1
### Patch Changes
- [#399](https://github.com/eyaltoledano/claude-task-master/pull/399) [`734a4fd`](https://github.com/eyaltoledano/claude-task-master/commit/734a4fdcfc89c2e089255618cf940561ad13a3c8) Thanks [@Crunchyman-ralph](https://github.com/Crunchyman-ralph)! - Fix ERR_MODULE_NOT_FOUND when trying to run MCP Server
## 0.13.0
### Minor Changes
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`ef782ff`](https://github.com/eyaltoledano/claude-task-master/commit/ef782ff5bd4ceb3ed0dc9ea82087aae5f79ac933) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - feat(expand): Enhance `expand` and `expand-all` commands
- Integrate `task-complexity-report.json` to automatically determine the number of subtasks and use tailored prompts for expansion based on prior analysis. You no longer need to try copy-pasting the recommended prompt. If it exists, it will use it for you. You can just run `task-master update --id=[id of task] --research` and it will use that prompt automatically. No extra prompt needed.
- Change default behavior to _append_ new subtasks to existing ones. Use the `--force` flag to clear existing subtasks before expanding. This is helpful if you need to add more subtasks to a task but you want to do it by the batch from a given prompt. Use force if you want to start fresh with a task's subtasks.
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`87d97bb`](https://github.com/eyaltoledano/claude-task-master/commit/87d97bba00d84e905756d46ef96b2d5b984e0f38) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds support for the OpenRouter AI provider. Users can now configure models available through OpenRouter (requiring an `OPENROUTER_API_KEY`) via the `task-master models` command, granting access to a wide range of additional LLMs. - IMPORTANT FYI ABOUT OPENROUTER: Taskmaster relies on AI SDK, which itself relies on tool use. It looks like **free** models sometimes do not include tool use. For example, Gemini 2.5 pro (free) failed via OpenRouter (no tool use) but worked fine on the paid version of the model. Custom model support for Open Router is considered experimental and likely will not be further improved for some time.
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`1ab836f`](https://github.com/eyaltoledano/claude-task-master/commit/1ab836f191cb8969153593a9a0bd47fc9aa4a831) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds model management and new configuration file .taskmasterconfig which houses the models used for main, research and fallback. Adds models command and setter flags. Adds a --setup flag with an interactive setup. We should be calling this during init. Shows a table of active and available models when models is called without flags. Includes SWE scores and token costs, which are manually entered into the supported_models.json, the new place where models are defined for support. Config-manager.js is the core module responsible for managing the new config."
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`c8722b0`](https://github.com/eyaltoledano/claude-task-master/commit/c8722b0a7a443a73b95d1bcd4a0b68e0fce2a1cd) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds custom model ID support for Ollama and OpenRouter providers.
- Adds the `--ollama` and `--openrouter` flags to `task-master models --set-<role>` command to set models for those providers outside of the support models list.
- Updated `task-master models --setup` interactive mode with options to explicitly enter custom Ollama or OpenRouter model IDs.
- Implemented live validation against OpenRouter API (`/api/v1/models`) when setting a custom OpenRouter model ID (via flag or setup).
- Refined logic to prioritize explicit provider flags/choices over internal model list lookups in case of ID conflicts.
- Added warnings when setting custom/unvalidated models.
- We obviously don't recommend going with a custom, unproven model. If you do and find performance is good, please let us know so we can add it to the list of supported models.
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`2517bc1`](https://github.com/eyaltoledano/claude-task-master/commit/2517bc112c9a497110f3286ca4bfb4130c9addcb) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Integrate OpenAI as a new AI provider. - Enhance `models` command/tool to display API key status. - Implement model-specific `maxTokens` override based on `supported-models.json` to save you if you use an incorrect max token value.
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`9a48278`](https://github.com/eyaltoledano/claude-task-master/commit/9a482789f7894f57f655fb8d30ba68542bd0df63) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Tweaks Perplexity AI calls for research mode to max out input tokens and get day-fresh information - Forces temp at 0.1 for highly deterministic output, no variations - Adds a system prompt to further improve the output - Correctly uses the maximum input tokens (8,719, used 8,700) for perplexity - Specificies to use a high degree of research across the web - Specifies to use information that is as fresh as today; this support stuff like capturing brand new announcements like new GPT models and being able to query for those in research. 🔥
### Patch Changes
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`842eaf7`](https://github.com/eyaltoledano/claude-task-master/commit/842eaf722498ddf7307800b4cdcef4ac4fd7e5b0) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - - Add support for Google Gemini models via Vercel AI SDK integration.
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`ed79d4f`](https://github.com/eyaltoledano/claude-task-master/commit/ed79d4f4735dfab4124fa189214c0bd5e23a6860) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Add xAI provider and Grok models support
- [#378](https://github.com/eyaltoledano/claude-task-master/pull/378) [`ad89253`](https://github.com/eyaltoledano/claude-task-master/commit/ad89253e313a395637aa48b9f92cc39b1ef94ad8) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Better support for file paths on Windows, Linux & WSL.
- Standardizes handling of different path formats (URI encoded, Windows, Linux, WSL).
- Ensures tools receive a clean, absolute path suitable for the server OS.
- Simplifies tool implementation by centralizing normalization logic.
- [#285](https://github.com/eyaltoledano/claude-task-master/pull/285) [`2acba94`](https://github.com/eyaltoledano/claude-task-master/commit/2acba945c0afee9460d8af18814c87e80f747e9f) Thanks [@neno-is-ooo](https://github.com/neno-is-ooo)! - Add integration for Roo Code
- [#378](https://github.com/eyaltoledano/claude-task-master/pull/378) [`d63964a`](https://github.com/eyaltoledano/claude-task-master/commit/d63964a10eed9be17856757661ff817ad6bacfdc) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Improved update-subtask - Now it has context about the parent task details - It also has context about the subtask before it and the subtask after it (if they exist) - Not passing all subtasks to stay token efficient
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`5f504fa`](https://github.com/eyaltoledano/claude-task-master/commit/5f504fafb8bdaa0043c2d20dee8bbb8ec2040d85) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Improve and adjust `init` command for robustness and updated dependencies.
- **Update Initialization Dependencies:** Ensure newly initialized projects (`task-master init`) include all required AI SDK dependencies (`@ai-sdk/*`, `ai`, provider wrappers) in their `package.json` for out-of-the-box AI feature compatibility. Remove unnecessary dependencies (e.g., `uuid`) from the init template.
- **Silence `npm install` during `init`:** Prevent `npm install` output from interfering with non-interactive/MCP initialization by suppressing its stdio in silent mode.
- **Improve Conditional Model Setup:** Reliably skip interactive `models --setup` during non-interactive `init` runs (e.g., `init -y` or MCP) by checking `isSilentMode()` instead of passing flags.
- **Refactor `init.js`:** Remove internal `isInteractive` flag logic.
- **Update `init` Instructions:** Tweak the "Getting Started" text displayed after `init`.
- **Fix MCP Server Launch:** Update `.cursor/mcp.json` template to use `node ./mcp-server/server.js` instead of `npx task-master-mcp`.
- **Update Default Model:** Change the default main model in the `.taskmasterconfig` template.
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`96aeeff`](https://github.com/eyaltoledano/claude-task-master/commit/96aeeffc195372722c6a07370540e235bfe0e4d8) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Fixes an issue with add-task which did not use the manually defined properties and still needlessly hit the AI endpoint.
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`5aea93d`](https://github.com/eyaltoledano/claude-task-master/commit/5aea93d4c0490c242d7d7042a210611977848e0a) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Fixes an issue that prevented remove-subtask with comma separated tasks/subtasks from being deleted (only the first ID was being deleted). Closes #140
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`66ac9ab`](https://github.com/eyaltoledano/claude-task-master/commit/66ac9ab9f66d006da518d6e8a3244e708af2764d) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Improves next command to be subtask-aware - The logic for determining the "next task" (findNextTask function, used by task-master next and the next_task MCP tool) has been significantly improved. Previously, it only considered top-level tasks, making its recommendation less useful when a parent task containing subtasks was already marked 'in-progress'. - The updated logic now prioritizes finding the next available subtask within any 'in-progress' parent task, considering subtask dependencies and priority. - If no suitable subtask is found within active parent tasks, it falls back to recommending the next eligible top-level task based on the original criteria (status, dependencies, priority).
This change makes the next command much more relevant and helpful during the implementation phase of complex tasks.
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`ca7b045`](https://github.com/eyaltoledano/claude-task-master/commit/ca7b0457f1dc65fd9484e92527d9fd6d69db758d) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Add `--status` flag to `show` command to filter displayed subtasks.
- [#328](https://github.com/eyaltoledano/claude-task-master/pull/328) [`5a2371b`](https://github.com/eyaltoledano/claude-task-master/commit/5a2371b7cc0c76f5e95d43921c1e8cc8081bf14e) Thanks [@knoxgraeme](https://github.com/knoxgraeme)! - Fix --task to --num-tasks in ui + related tests - issue #324
- [#240](https://github.com/eyaltoledano/claude-task-master/pull/240) [`6cb213e`](https://github.com/eyaltoledano/claude-task-master/commit/6cb213ebbd51116ae0688e35b575d09443d17c3b) Thanks [@eyaltoledano](https://github.com/eyaltoledano)! - Adds a 'models' CLI and MCP command to get the current model configuration, available models, and gives the ability to set main/research/fallback models." - In the CLI, `task-master models` shows the current models config. Using the `--setup` flag launches an interactive set up that allows you to easily select the models you want to use for each of the three roles. Use `q` during the interactive setup to cancel the setup. - In the MCP, responses are simplified in RESTful format (instead of the full CLI output). The agent can use the `models` tool with different arguments, including `listAvailableModels` to get available models. Run without arguments, it will return the current configuration. Arguments are available to set the model for each of the three roles. This allows you to manage Taskmaster AI providers and models directly from either the CLI or MCP or both. - Updated the CLI help menu when you run `task-master` to include missing commands and .taskmasterconfig information. - Adds `--research` flag to `add-task` so you can hit up Perplexity right from the add-task flow, rather than having to add a task and then update it.
## 0.12.1
### Patch Changes

View File

@@ -47,7 +47,7 @@ npm install task-master-ai
task-master init
# If installed locally
npx task-master-init
npx task-master init
```
This will prompt you for project details and set up a new project with the necessary files and structure.

View File

@@ -14,8 +14,8 @@
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3.5-sonnet-20240620",
"maxTokens": 120000,
"modelId": "claude-3-5-sonnet-20240620",
"maxTokens": 8192,
"temperature": 0.1
}
},

View File

@@ -198,7 +198,7 @@ alwaysApply: true
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
- **LOG_LEVEL** (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`)
- **TASKMASTER_LOG_LEVEL** (Default: `"info"`): Console output level (Example: `TASKMASTER_LOG_LEVEL=debug`)
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)

View File

@@ -1,8 +1,8 @@
# API Keys (Required to enable respective provider)
ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-...
PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-...
OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models.
MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models.
XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models.
AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Required: Format: sk-ant-api03-...
PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Optional: Format: pplx-...
OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models.
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).

View File

@@ -31,7 +31,7 @@ Task Master configuration is now managed through two primary methods:
- Create a `.env` file in your project root for CLI usage.
- See `assets/env.example` for required key names.
**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.
**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `TASKMASTER_LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.
## How It Works
@@ -200,7 +200,7 @@ Notes:
## Logging
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:
- `debug`: Detailed information, typically useful for troubleshooting
- `info`: Confirmation that things are working as expected (default)

View File

@@ -15,13 +15,15 @@ Taskmaster uses two primary methods for configuration:
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 64000,
"temperature": 0.2
"temperature": 0.2,
"baseUrl": "https://api.anthropic.com/v1"
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
"temperature": 0.1,
"baseUrl": "https://api.perplexity.ai/v1"
},
"fallback": {
"provider": "anthropic",
@@ -56,8 +58,9 @@ Taskmaster uses two primary methods for configuration:
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
- `XAI_API_KEY`: Your X-AI API key.
- **Optional Endpoint Overrides (in .taskmasterconfig):**
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key.
- **Optional Endpoint Overrides:**
- **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role).
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.

View File

@@ -89,7 +89,7 @@ Initialize a new project:
task-master init
# If installed locally
npx task-master-init
npx task-master init
```
This will prompt you for project details and set up a new project with the necessary files and structure.

View File

@@ -6,6 +6,10 @@
import path from 'path';
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
import { createLogWrapper } from '../../tools/utils.js';
import {
enableSilentMode,
disableSilentMode
} from '../../../../scripts/modules/utils.js';
/**
* Direct function wrapper for updating tasks based on new context.

View File

@@ -11,6 +11,7 @@ import {
} from './utils.js';
import { setTaskStatusDirect } from '../core/task-master-core.js';
import { findTasksJsonPath } from '../core/utils/path-utils.js';
import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';
/**
* Register the setTaskStatus tool with the MCP server
@@ -27,7 +28,7 @@ export function registerSetTaskStatusTool(server) {
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
),
status: z
.string()
.enum(TASK_STATUS_OPTIONS)
.describe(
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
),

34
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "task-master-ai",
"version": "0.12.1",
"version": "0.13.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "task-master-ai",
"version": "0.12.1",
"version": "0.13.2",
"license": "MIT WITH Commons-Clause",
"dependencies": {
"@ai-sdk/anthropic": "^1.2.10",
@@ -19,6 +19,9 @@
"@anthropic-ai/sdk": "^0.39.0",
"@openrouter/ai-sdk-provider": "^0.4.5",
"ai": "^4.3.10",
"boxen": "^8.0.1",
"chalk": "^5.4.1",
"cli-table3": "^0.6.5",
"commander": "^11.1.0",
"cors": "^2.8.5",
"dotenv": "^16.3.1",
@@ -34,7 +37,8 @@
"ollama-ai-provider": "^1.2.0",
"openai": "^4.89.0",
"ora": "^8.2.0",
"uuid": "^11.1.0"
"uuid": "^11.1.0",
"zod": "^3.23.8"
},
"bin": {
"task-master": "bin/task-master.js",
@@ -45,9 +49,6 @@
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",
"@types/jest": "^29.5.14",
"boxen": "^8.0.1",
"chalk": "^5.4.1",
"cli-table3": "^0.6.5",
"execa": "^8.0.1",
"ink": "^5.0.1",
"jest": "^29.7.0",
@@ -57,8 +58,7 @@
"prettier": "^3.5.3",
"react": "^18.3.1",
"supertest": "^7.1.0",
"tsx": "^4.16.2",
"zod": "^3.23.8"
"tsx": "^4.16.2"
},
"engines": {
"node": ">=14.0.0"
@@ -1238,7 +1238,6 @@
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz",
"integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==",
"dev": true,
"license": "MIT",
"optional": true,
"engines": {
@@ -3307,7 +3306,6 @@
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
"integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==",
"dev": true,
"license": "ISC",
"dependencies": {
"string-width": "^4.1.0"
@@ -3317,7 +3315,6 @@
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
@@ -3327,14 +3324,12 @@
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true,
"license": "MIT"
},
"node_modules/ansi-align/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
@@ -3349,7 +3344,6 @@
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
@@ -3699,7 +3693,6 @@
"version": "8.0.1",
"resolved": "https://registry.npmjs.org/boxen/-/boxen-8.0.1.tgz",
"integrity": "sha512-F3PH5k5juxom4xktynS7MoFY+NUWH5LC4CnH11YB8NPew+HLpmBLCybSAEyb2F+4pRXhuhWqFesoQd6DAyc2hw==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-align": "^3.0.1",
@@ -3850,7 +3843,6 @@
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-8.0.0.tgz",
"integrity": "sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=16"
@@ -3935,7 +3927,6 @@
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz",
"integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
@@ -3975,7 +3966,6 @@
"version": "0.6.5",
"resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.5.tgz",
"integrity": "sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"string-width": "^4.2.0"
@@ -3991,7 +3981,6 @@
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
@@ -4001,14 +3990,12 @@
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true,
"license": "MIT"
},
"node_modules/cli-table3/node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dev": true,
"license": "MIT",
"dependencies": {
"emoji-regex": "^8.0.0",
@@ -4023,7 +4010,6 @@
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-regex": "^5.0.1"
@@ -9488,7 +9474,6 @@
"version": "4.37.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.37.0.tgz",
"integrity": "sha512-S/5/0kFftkq27FPNye0XM1e2NsnoD/3FS+pBmbjmmtLT6I+i344KoOf7pvXreaFsDamWeaJX55nczA1m5PsBDg==",
"dev": true,
"license": "(MIT OR CC0-1.0)",
"engines": {
"node": ">=16"
@@ -9698,7 +9683,6 @@
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/widest-line/-/widest-line-5.0.0.tgz",
"integrity": "sha512-c9bZp7b5YtRj2wOe6dlj32MK+Bx/M/d+9VB2SHM1OtsUHR0aV0tdP6DWh/iMt0kWi1t5g1Iudu6hQRNd1A4PVA==",
"dev": true,
"license": "MIT",
"dependencies": {
"string-width": "^7.0.0"
@@ -9714,7 +9698,6 @@
"version": "9.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.0.tgz",
"integrity": "sha512-G8ura3S+3Z2G+mkgNRq8dqaFZAuxfsxpBB8OCTGRTCtp+l/v9nbFNmCUP1BZMts3G1142MsZfn6eeUKrr4PD1Q==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^6.2.1",
@@ -9732,7 +9715,6 @@
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"

View File

@@ -1,6 +1,6 @@
{
"name": "task-master-ai",
"version": "0.12.1",
"version": "0.13.2",
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
"main": "index.js",
"type": "module",
@@ -64,7 +64,11 @@
"ollama-ai-provider": "^1.2.0",
"openai": "^4.89.0",
"ora": "^8.2.0",
"uuid": "^11.1.0"
"uuid": "^11.1.0",
"boxen": "^8.0.1",
"chalk": "^5.4.1",
"cli-table3": "^0.6.5",
"zod": "^3.23.8"
},
"engines": {
"node": ">=14.0.0"
@@ -78,15 +82,14 @@
"url": "https://github.com/eyaltoledano/claude-task-master/issues"
},
"files": [
"scripts/init.js",
"scripts/dev.js",
"scripts/modules/**",
"scripts/**",
"assets/**",
".cursor/**",
"README-task-master.md",
"index.js",
"bin/**",
"mcp-server/**"
"mcp-server/**",
"src/**"
],
"overrides": {
"node-fetch": "^3.3.2",
@@ -96,9 +99,6 @@
"@changesets/changelog-github": "^0.5.1",
"@changesets/cli": "^2.28.1",
"@types/jest": "^29.5.14",
"boxen": "^8.0.1",
"chalk": "^5.4.1",
"cli-table3": "^0.6.5",
"execa": "^8.0.1",
"ink": "^5.0.1",
"jest": "^29.7.0",
@@ -108,7 +108,6 @@
"prettier": "^3.5.3",
"react": "^18.3.1",
"supertest": "^7.1.0",
"tsx": "^4.16.2",
"zod": "^3.23.8"
"tsx": "^4.16.2"
}
}

View File

@@ -32,7 +32,7 @@ The script can be configured through environment variables in a `.env` file at t
- `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation
- `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online")
- `DEBUG`: Enable debug logging (default: false)
- `LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
- `TASKMASTER_LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
- `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3)
- `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium)
- `PROJECT_NAME`: Override default project name in tasks.json
@@ -225,7 +225,7 @@ To use the Perplexity integration:
## Logging
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:
- `debug`: Detailed information, typically useful for troubleshooting
- `info`: Confirmation that things are working as expected (default)

View File

@@ -38,10 +38,10 @@ const LOG_LEVELS = {
success: 4
};
// Get log level from environment or default to info
const LOG_LEVEL = process.env.LOG_LEVEL
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
: LOG_LEVELS.info;
// Determine log level from environment variable or default to 'info'
const LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL
? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()]
: LOG_LEVELS.info; // Default to info
// Create a color gradient for the banner
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);

View File

@@ -14,7 +14,8 @@ import {
getResearchModelId,
getFallbackProvider,
getFallbackModelId,
getParametersForRole
getParametersForRole,
getBaseUrlForRole
} from './config-manager.js';
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
@@ -284,7 +285,13 @@ async function _unifiedServiceRunner(serviceType, params) {
'AI service call failed for all configured roles.';
for (const currentRole of sequence) {
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
let providerName,
modelId,
apiKey,
roleParams,
providerFnSet,
providerApiFn,
baseUrl;
try {
log('info', `New AI service call with role: ${currentRole}`);
@@ -325,6 +332,7 @@ async function _unifiedServiceRunner(serviceType, params) {
// Pass effectiveProjectRoot to getParametersForRole
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
// 2. Get Provider Function Set
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
@@ -401,6 +409,7 @@ async function _unifiedServiceRunner(serviceType, params) {
maxTokens: roleParams.maxTokens,
temperature: roleParams.temperature,
messages,
baseUrl,
...(serviceType === 'generateObject' && { schema, objectName }),
...restApiParams
};

View File

@@ -73,7 +73,11 @@ import {
getApiKeyStatusReport
} from './task-manager/models.js';
import { findProjectRoot } from './utils.js';
import {
isValidTaskStatus,
TASK_STATUS_OPTIONS
} from '../../src/constants/task-status.js';
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
/**
* Runs the interactive setup process for model configuration.
* @param {string|null} projectRoot - The resolved project root directory.
@@ -486,11 +490,6 @@ function registerCommands(programInstance) {
process.exit(1);
});
// Default help
programInstance.on('--help', function () {
displayHelp();
});
// parse-prd command
programInstance
.command('parse-prd')
@@ -515,7 +514,7 @@ function registerCommands(programInstance) {
const outputPath = options.output;
const force = options.force || false;
const append = options.append || false;
let useForce = false;
let useForce = force;
let useAppend = false;
// Helper function to check if tasks.json exists and confirm overwrite
@@ -609,7 +608,7 @@ function registerCommands(programInstance) {
spinner = ora('Parsing PRD and generating tasks...').start();
await parsePRD(inputFile, outputPath, numTasks, {
append: useAppend,
force: useForce
useForce
});
spinner.succeed('Tasks generated successfully!');
} catch (error) {
@@ -1038,7 +1037,7 @@ function registerCommands(programInstance) {
)
.option(
'-s, --status <status>',
'New status (todo, in-progress, review, done)'
`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})`
)
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.action(async (options) => {
@@ -1051,6 +1050,16 @@ function registerCommands(programInstance) {
process.exit(1);
}
if (!isValidTaskStatus(status)) {
console.error(
chalk.red(
`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
)
);
process.exit(1);
}
console.log(
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
);
@@ -1278,10 +1287,6 @@ function registerCommands(programInstance) {
'--details <details>',
'Implementation details (for manual task creation)'
)
.option(
'--test-strategy <testStrategy>',
'Test strategy (for manual task creation)'
)
.option(
'--dependencies <dependencies>',
'Comma-separated list of task IDs this task depends on'
@@ -1663,6 +1668,7 @@ function registerCommands(programInstance) {
}
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
showAddSubtaskHelp();
process.exit(1);
}
})
@@ -2366,14 +2372,7 @@ function setupCLI() {
return 'unknown'; // Default fallback if package.json fails
})
.helpOption('-h, --help', 'Display help')
.addHelpCommand(false) // Disable default help command
.on('--help', () => {
displayHelp(); // Use your custom help display instead
})
.on('-h', () => {
displayHelp();
process.exit(0);
});
.addHelpCommand(false); // Disable default help command
// Modify the help option to use your custom display
programInstance.helpInformation = () => {
@@ -2393,28 +2392,7 @@ function setupCLI() {
*/
async function checkForUpdate() {
// Get current version from package.json ONLY
let currentVersion = 'unknown'; // Initialize with a default
try {
// Try to get the version from the installed package (if applicable) or current dir
let packageJsonPath = path.join(
process.cwd(),
'node_modules',
'task-master-ai',
'package.json'
);
// Fallback to current directory package.json if not found in node_modules
if (!fs.existsSync(packageJsonPath)) {
packageJsonPath = path.join(process.cwd(), 'package.json');
}
if (fs.existsSync(packageJsonPath)) {
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
currentVersion = packageJson.version;
}
} catch (error) {
// Silently fail and use default
log('debug', `Error reading current package version: ${error.message}`);
}
const currentVersion = getTaskMasterVersion();
return new Promise((resolve) => {
// Get the latest version from npm registry

View File

@@ -677,6 +677,13 @@ function getAllProviders() {
return Object.keys(MODEL_MAP || {});
}
function getBaseUrlForRole(role, explicitRoot = null) {
const roleConfig = getModelConfigForRole(role, explicitRoot);
return roleConfig && typeof roleConfig.baseUrl === 'string'
? roleConfig.baseUrl
: undefined;
}
export {
// Core config access
getConfig,
@@ -704,6 +711,7 @@ export {
getFallbackModelId,
getFallbackMaxTokens,
getFallbackTemperature,
getBaseUrlForRole,
// Global setting getters (No env var overrides)
getLogLevel,

View File

@@ -8,6 +8,10 @@ import { validateTaskDependencies } from '../dependency-manager.js';
import { getDebugFlag } from '../config-manager.js';
import updateSingleTaskStatus from './update-single-task-status.js';
import generateTaskFiles from './generate-task-files.js';
import {
isValidTaskStatus,
TASK_STATUS_OPTIONS
} from '../../../src/constants/task-status.js';
/**
* Set the status of a task
@@ -19,6 +23,11 @@ import generateTaskFiles from './generate-task-files.js';
*/
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
try {
if (!isValidTaskStatus(newStatus)) {
throw new Error(
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
);
}
// Determine if we're in MCP mode by checking for mcpLog
const isMcpMode = !!options?.mcpLog;

View File

@@ -1,6 +1,7 @@
import chalk from 'chalk';
import { log } from '../utils.js';
import { isValidTaskStatus } from '../../../src/constants/task-status.js';
/**
* Update the status of a single task
@@ -17,6 +18,12 @@ async function updateSingleTaskStatus(
data,
showUi = true
) {
if (!isValidTaskStatus(newStatus)) {
throw new Error(
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
);
}
// Check if it's a subtask (e.g., "1.2")
if (taskIdInput.includes('.')) {
const [parentId, subtaskId] = taskIdInput

View File

@@ -16,10 +16,11 @@ import {
truncate,
isSilentMode
} from './utils.js';
import path from 'path';
import fs from 'fs';
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
// Create a color gradient for the banner
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
@@ -46,17 +47,7 @@ function displayBanner() {
);
// Read version directly from package.json
let version = 'unknown'; // Initialize with a default
try {
const packageJsonPath = path.join(process.cwd(), 'package.json');
if (fs.existsSync(packageJsonPath)) {
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
version = packageJson.version;
}
} catch (error) {
// Silently fall back to default version
log('warn', 'Could not read package.json for version info.');
}
const version = getTaskMasterVersion();
console.log(
boxen(
@@ -458,7 +449,7 @@ function displayHelp() {
{
name: 'set-status',
args: '--id=<id> --status=<status>',
desc: 'Update task status (done, pending, etc.)'
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`
},
{
name: 'update',
@@ -809,12 +800,7 @@ async function displayNextTask(tasksPath) {
'padding-bottom': 0,
compact: true
},
chars: {
mid: '',
'left-mid': '',
'mid-mid': '',
'right-mid': ''
},
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },
colWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],
wordWrap: true
});
@@ -902,12 +888,7 @@ async function displayNextTask(tasksPath) {
'padding-bottom': 0,
compact: true
},
chars: {
mid: '',
'left-mid': '',
'mid-mid': '',
'right-mid': ''
},
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },
wordWrap: true
});

View File

@@ -5,7 +5,7 @@
* using the Vercel AI SDK.
*/
import { createAnthropic } from '@ai-sdk/anthropic';
import { generateText, streamText, generateObject, streamObject } from 'ai';
import { generateText, streamText, generateObject } from 'ai';
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
// TODO: Implement standardized functions for generateText, streamText, generateObject
@@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
// Remove the global variable and caching logic
// let anthropicClient;
function getClient(apiKey) {
function getClient(apiKey, baseUrl) {
if (!apiKey) {
// In a real scenario, this would use the config resolver.
// Throwing error here if key isn't passed for simplicity.
@@ -30,14 +30,12 @@ function getClient(apiKey) {
// Create and return a new instance directly with standard version header
return createAnthropic({
apiKey: apiKey,
baseURL: 'https://api.anthropic.com/v1',
...(baseUrl && { baseURL: baseUrl }),
// Use standard version header instead of beta
headers: {
'anthropic-beta': 'output-128k-2025-02-19'
}
});
// }
// return anthropicClient;
}
// --- Standardized Service Function Implementations ---
@@ -51,6 +49,7 @@ function getClient(apiKey) {
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails.
*/
@@ -59,11 +58,12 @@ export async function generateAnthropicText({
modelId,
messages,
maxTokens,
temperature
temperature,
baseUrl
}) {
log('debug', `Generating Anthropic text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const client = getClient(apiKey, baseUrl);
const result = await generateText({
model: client(modelId),
messages: messages,
@@ -93,6 +93,7 @@ export async function generateAnthropicText({
* @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream.
*/
@@ -101,20 +102,20 @@ export async function streamAnthropicText({
modelId,
messages,
maxTokens,
temperature
temperature,
baseUrl
}) {
log('debug', `Streaming Anthropic text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const client = getClient(apiKey, baseUrl);
// --- DEBUG LOGGING --- >>
log(
'debug',
'[streamAnthropicText] Parameters received by streamText:',
JSON.stringify(
{
modelId: modelId, // Log modelId being used
messages: messages, // Log the messages array
modelId: modelId,
messages: messages,
maxTokens: maxTokens,
temperature: temperature
},
@@ -122,25 +123,19 @@ export async function streamAnthropicText({
2
)
);
// --- << DEBUG LOGGING ---
const stream = await streamText({
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
// Beta header moved to client initialization
// TODO: Add other relevant parameters
});
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
return stream;
} catch (error) {
log(
'error',
`Anthropic streamText failed: ${error.message}`,
error.stack // Log stack trace for more details
);
log('error', `Anthropic streamText failed: ${error.message}`, error.stack);
throw error;
}
}
@@ -160,6 +155,7 @@ export async function streamAnthropicText({
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails.
*/
@@ -171,24 +167,22 @@ export async function generateAnthropicObject({
objectName = 'generated_object',
maxTokens,
temperature,
maxRetries = 3
maxRetries = 3,
baseUrl
}) {
log(
'debug',
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
);
try {
const client = getClient(apiKey);
// Log basic debug info
const client = getClient(apiKey, baseUrl);
log(
'debug',
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
);
const result = await generateObject({
model: client(modelId),
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
mode: 'tool',
schema: schema,
messages: messages,
tool: {
@@ -199,14 +193,12 @@ export async function generateAnthropicObject({
temperature: temperature,
maxRetries: maxRetries
});
log(
'debug',
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.object;
} catch (error) {
// Simple error logging
log(
'error',
`Anthropic generateObject ('${objectName}') failed: ${error.message}`

View File

@@ -12,6 +12,16 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('Google API key is required.');
}
return createGoogleGenerativeAI({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
/**
* Generates text using a Google AI model.
*
@@ -29,7 +39,8 @@ async function generateGoogleText({
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
maxTokens // Note: Vercel SDK might handle this differently, needs verification
maxTokens,
baseUrl
}) {
if (!apiKey) {
throw new Error('Google API key is required.');
@@ -37,28 +48,21 @@ async function generateGoogleText({
log('info', `Generating text with Google model: ${modelId}`);
try {
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
const model = googleProvider(modelId); // Correct model retrieval
// Construct payload suitable for Vercel SDK's generateText
// Note: The exact structure might depend on how messages are passed
const googleProvider = getClient(apiKey, baseUrl);
const model = googleProvider(modelId);
const result = await generateText({
model, // Pass the model instance
messages, // Pass the messages array directly
model,
messages,
temperature,
maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available
maxOutputTokens: maxTokens
});
// Assuming result structure provides text directly or within a property
return result.text; // Adjust based on actual SDK response
return result.text;
} catch (error) {
log(
'error',
`Error generating text with Google (${modelId}): ${error.message}`
);
throw error; // Re-throw for unified service handler
throw error;
}
}
@@ -79,7 +83,8 @@ async function streamGoogleText({
modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
messages,
maxTokens
maxTokens,
baseUrl
}) {
if (!apiKey) {
throw new Error('Google API key is required.');
@@ -87,19 +92,15 @@ async function streamGoogleText({
log('info', `Streaming text with Google model: ${modelId}`);
try {
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
const model = googleProvider(modelId); // Correct model retrieval
const googleProvider = getClient(apiKey, baseUrl);
const model = googleProvider(modelId);
const stream = await streamText({
model, // Pass the model instance
model,
messages,
temperature,
maxOutputTokens: maxTokens
});
return stream; // Return the stream directly
return stream;
} catch (error) {
log(
'error',
@@ -130,7 +131,8 @@ async function generateGoogleObject({
messages,
schema,
objectName, // Note: Vercel SDK might use this differently or not at all
maxTokens
maxTokens,
baseUrl
}) {
if (!apiKey) {
throw new Error('Google API key is required.');
@@ -138,23 +140,16 @@ async function generateGoogleObject({
log('info', `Generating object with Google model: ${modelId}`);
try {
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
const model = googleProvider(modelId); // Correct model retrieval
const googleProvider = getClient(apiKey, baseUrl);
const model = googleProvider(modelId);
const { object } = await generateObject({
model, // Pass the model instance
model,
schema,
messages,
temperature,
maxOutputTokens: maxTokens
// Note: 'objectName' or 'mode' might not be directly applicable here
// depending on how `@ai-sdk/google` handles `generateObject`.
// Check SDK docs if specific tool calling/JSON mode needs explicit setup.
});
return object; // Return the parsed object
return object;
} catch (error) {
log(
'error',

View File

@@ -1,16 +1,26 @@
import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai'
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
import { generateObject } from 'ai'; // Import necessary functions from 'ai'
import { log } from '../../scripts/modules/utils.js';
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('OpenAI API key is required.');
}
return createOpenAI({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
/**
* Generates text using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If API call fails.
*/
export async function generateOpenAIText(params) {
const { apiKey, modelId, messages, maxTokens, temperature } = params;
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
log('debug', `generateOpenAIText called with model: ${modelId}`);
if (!apiKey) {
@@ -23,18 +33,15 @@ export async function generateOpenAIText(params) {
throw new Error('Invalid or empty messages array provided for OpenAI.');
}
const openaiClient = createOpenAI({ apiKey });
const openaiClient = getClient(apiKey, baseUrl);
try {
const result = await openaiClient.chat(messages, {
// Updated: Use openaiClient.chat directly
model: modelId,
max_tokens: maxTokens,
temperature
});
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
// This might need refinement based on testing the SDK's output.
const textContent = result?.choices?.[0]?.message?.content?.trim();
if (!textContent) {
@@ -65,12 +72,12 @@ export async function generateOpenAIText(params) {
/**
* Streams text using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
* @throws {Error} If API call fails.
*/
export async function streamOpenAIText(params) {
const { apiKey, modelId, messages, maxTokens, temperature } = params;
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
log('debug', `streamOpenAIText called with model: ${modelId}`);
if (!apiKey) {
@@ -85,12 +92,10 @@ export async function streamOpenAIText(params) {
);
}
const openaiClient = createOpenAI({ apiKey });
const openaiClient = getClient(apiKey, baseUrl);
try {
// Use the streamText function from Vercel AI SDK core
const stream = await openaiClient.chat.stream(messages, {
// Updated: Use openaiClient.chat.stream
model: modelId,
max_tokens: maxTokens,
temperature
@@ -100,7 +105,6 @@ export async function streamOpenAIText(params) {
'debug',
`OpenAI streamText initiated successfully for model: ${modelId}`
);
// The Vercel SDK's streamText should directly return the stream object
return stream;
} catch (error) {
log(
@@ -117,7 +121,7 @@ export async function streamOpenAIText(params) {
/**
* Generates structured objects using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If API call fails or object generation fails.
*/
@@ -129,7 +133,8 @@ export async function generateOpenAIObject(params) {
schema,
objectName,
maxTokens,
temperature
temperature,
baseUrl
} = params;
log(
'debug',
@@ -145,10 +150,9 @@ export async function generateOpenAIObject(params) {
if (!objectName)
throw new Error('Object name is required for OpenAI object generation.');
const openaiClient = createOpenAI({ apiKey });
const openaiClient = getClient(apiKey, baseUrl);
try {
// Use the imported generateObject function from 'ai' package
const result = await generateObject({
model: openaiClient(modelId),
schema: schema,

View File

@@ -2,6 +2,14 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
import { generateText, streamText, generateObject } from 'ai';
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
function getClient(apiKey, baseUrl) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
return createOpenRouter({
apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
/**
* Generates text using an OpenRouter chat model.
*
@@ -11,6 +19,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
* @param {number} [params.maxTokens] - Maximum tokens to generate.
* @param {number} [params.temperature] - Sampling temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails.
*/
@@ -20,6 +29,7 @@ async function generateOpenRouterText({
messages,
maxTokens,
temperature,
baseUrl,
...rest // Capture any other Vercel AI SDK compatible parameters
}) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
@@ -28,7 +38,7 @@ async function generateOpenRouterText({
throw new Error('Messages array cannot be empty.');
try {
const openrouter = createOpenRouter({ apiKey });
const openrouter = getClient(apiKey, baseUrl);
const model = openrouter.chat(modelId); // Assuming chat model
const { text } = await generateText({
@@ -58,6 +68,7 @@ async function generateOpenRouterText({
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
* @param {number} [params.maxTokens] - Maximum tokens to generate.
* @param {number} [params.temperature] - Sampling temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
* @throws {Error} If the API call fails.
*/
@@ -67,6 +78,7 @@ async function streamOpenRouterText({
messages,
maxTokens,
temperature,
baseUrl,
...rest
}) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
@@ -75,7 +87,7 @@ async function streamOpenRouterText({
throw new Error('Messages array cannot be empty.');
try {
const openrouter = createOpenRouter({ apiKey });
const openrouter = getClient(apiKey, baseUrl);
const model = openrouter.chat(modelId);
// Directly return the stream from the Vercel AI SDK function
@@ -108,6 +120,7 @@ async function streamOpenRouterText({
* @param {number} [params.maxRetries=3] - Max retries for object generation.
* @param {number} [params.maxTokens] - Maximum tokens.
* @param {number} [params.temperature] - Temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If the API call fails or validation fails.
*/
@@ -120,6 +133,7 @@ async function generateOpenRouterObject({
maxRetries = 3,
maxTokens,
temperature,
baseUrl,
...rest
}) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
@@ -129,7 +143,7 @@ async function generateOpenRouterObject({
throw new Error('Messages array cannot be empty.');
try {
const openrouter = createOpenRouter({ apiKey });
const openrouter = getClient(apiKey, baseUrl);
const model = openrouter.chat(modelId);
const { object } = await generateObject({

View File

@@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js';
// --- Client Instantiation ---
// Similar to Anthropic, this expects the resolved API key to be passed in.
function getClient(apiKey) {
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('Perplexity API key is required.');
}
// Create and return a new instance directly
return createPerplexity({
apiKey: apiKey
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
@@ -31,6 +31,7 @@ function getClient(apiKey) {
* @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails.
*/
@@ -39,11 +40,12 @@ export async function generatePerplexityText({
modelId,
messages,
maxTokens,
temperature
temperature,
baseUrl
}) {
log('debug', `Generating Perplexity text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const client = getClient(apiKey, baseUrl);
const result = await generateText({
model: client(modelId),
messages: messages,
@@ -70,6 +72,7 @@ export async function generatePerplexityText({
* @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream.
*/
@@ -78,11 +81,12 @@ export async function streamPerplexityText({
modelId,
messages,
maxTokens,
temperature
temperature,
baseUrl
}) {
log('debug', `Streaming Perplexity text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const client = getClient(apiKey, baseUrl);
const stream = await streamText({
model: client(modelId),
messages: messages,
@@ -112,6 +116,7 @@ export async function streamPerplexityText({
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails or is unsupported.
*/
@@ -123,7 +128,8 @@ export async function generatePerplexityObject({
objectName = 'generated_object',
maxTokens,
temperature,
maxRetries = 1 // Lower retries as support might be limited
maxRetries = 1,
baseUrl
}) {
log(
'debug',
@@ -134,8 +140,7 @@ export async function generatePerplexityObject({
'generateObject support for Perplexity might be limited or experimental.'
);
try {
const client = getClient(apiKey);
// Attempt using generateObject, but be prepared for potential issues
const client = getClient(apiKey, baseUrl);
const result = await generateObject({
model: client(modelId),
schema: schema,

View File

@@ -9,14 +9,13 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
// --- Client Instantiation ---
function getClient(apiKey) {
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('xAI API key is required.');
}
// Create and return a new instance directly
return createXai({
apiKey: apiKey
// Add baseURL or other options if needed later
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
@@ -31,6 +30,7 @@ function getClient(apiKey) {
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails.
*/
@@ -39,13 +39,14 @@ export async function generateXaiText({
modelId,
messages,
maxTokens,
temperature
temperature,
baseUrl
}) {
log('debug', `Generating xAI text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const client = getClient(apiKey, baseUrl);
const result = await generateText({
model: client(modelId), // Correct model invocation
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
@@ -70,6 +71,7 @@ export async function generateXaiText({
* @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream.
*/
@@ -78,18 +80,19 @@ export async function streamXaiText({
modelId,
messages,
maxTokens,
temperature
temperature,
baseUrl
}) {
log('debug', `Streaming xAI text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const client = getClient(apiKey, baseUrl);
const stream = await streamText({
model: client(modelId), // Correct model invocation
model: client(modelId),
messages: messages,
maxTokens: maxTokens,
temperature: temperature
});
return stream; // Return the full stream object
return stream;
} catch (error) {
log('error', `xAI streamText failed: ${error.message}`, error.stack);
throw error;
@@ -110,6 +113,7 @@ export async function streamXaiText({
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails.
*/
@@ -121,16 +125,17 @@ export async function generateXaiObject({
objectName = 'generated_xai_object',
maxTokens,
temperature,
maxRetries = 3
maxRetries = 3,
baseUrl
}) {
log(
'warn', // Log warning as this is likely unsupported
'warn',
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
);
try {
const client = getClient(apiKey);
const client = getClient(apiKey, baseUrl);
const result = await generateObject({
model: client(modelId), // Correct model invocation
model: client(modelId),
// Note: mode might need adjustment if xAI ever supports object generation differently
mode: 'tool',
schema: schema,
@@ -153,6 +158,6 @@ export async function generateXaiObject({
'error',
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
);
throw error; // Re-throw the error
throw error;
}
}

View File

@@ -0,0 +1,32 @@
/**
* @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus
*/
/**
* Task status options list
* @type {TaskStatus[]}
* @description Defines possible task statuses:
* - pending: Task waiting to start
* - done: Task completed
* - in-progress: Task in progress
* - review: Task completed and waiting for review
* - deferred: Task postponed or paused
* - cancelled: Task cancelled and will not be completed
*/
export const TASK_STATUS_OPTIONS = [
'pending',
'done',
'in-progress',
'review',
'deferred',
'cancelled'
];
/**
* Check if a given status is a valid task status
* @param {string} status - The status to check
* @returns {boolean} True if the status is valid, false otherwise
*/
export function isValidTaskStatus(status) {
return TASK_STATUS_OPTIONS.includes(status);
}

35
src/utils/getVersion.js Normal file
View File

@@ -0,0 +1,35 @@
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { log } from '../../scripts/modules/utils.js';
/**
* Reads the version from the nearest package.json relative to this file.
* Returns 'unknown' if not found or on error.
* @returns {string} The version string or 'unknown'.
*/
export function getTaskMasterVersion() {
let version = 'unknown';
try {
// Get the directory of the current module (getPackageVersion.js)
const currentModuleFilename = fileURLToPath(import.meta.url);
const currentModuleDirname = path.dirname(currentModuleFilename);
// Construct the path to package.json relative to this file (../../package.json)
const packageJsonPath = path.join(
currentModuleDirname,
'..',
'..',
'package.json'
);
if (fs.existsSync(packageJsonPath)) {
const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8');
const packageJson = JSON.parse(packageJsonContent);
version = packageJson.version;
}
} catch (error) {
// Silently fall back to default version
log('warn', 'Could not read own package.json for version info.', error);
}
return version;
}

View File

@@ -9,7 +9,7 @@ process.env.MODEL = 'sonar-pro';
process.env.MAX_TOKENS = '64000';
process.env.TEMPERATURE = '0.2';
process.env.DEBUG = 'false';
process.env.LOG_LEVEL = 'error'; // Set to error to reduce noise in tests
process.env.TASKMASTER_LOG_LEVEL = 'error'; // Set to error to reduce noise in tests
process.env.DEFAULT_SUBTASKS = '5';
process.env.DEFAULT_PRIORITY = 'medium';
process.env.PROJECT_NAME = 'Test Project';

View File

@@ -8,6 +8,7 @@ const mockGetResearchModelId = jest.fn();
const mockGetFallbackProvider = jest.fn();
const mockGetFallbackModelId = jest.fn();
const mockGetParametersForRole = jest.fn();
const mockGetBaseUrlForRole = jest.fn();
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
getMainProvider: mockGetMainProvider,
@@ -16,7 +17,8 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
getResearchModelId: mockGetResearchModelId,
getFallbackProvider: mockGetFallbackProvider,
getFallbackModelId: mockGetFallbackModelId,
getParametersForRole: mockGetParametersForRole
getParametersForRole: mockGetParametersForRole,
getBaseUrlForRole: mockGetBaseUrlForRole
}));
// Mock AI Provider Modules

View File

@@ -199,6 +199,12 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
// Simplified version of updateSingleTaskStatus for testing
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
if (!isValidTaskStatus(newStatus)) {
throw new Error(
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
);
}
// Check if it's a subtask (e.g., "1.2")
if (taskIdInput.includes('.')) {
const [parentId, subtaskId] = taskIdInput
@@ -329,6 +335,10 @@ const testAddTask = (
import * as taskManager from '../../scripts/modules/task-manager.js';
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
import {
isValidTaskStatus,
TASK_STATUS_OPTIONS
} from '../../src/constants/task-status.js';
// Destructure the required functions for convenience
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
@@ -1165,6 +1175,16 @@ describe('Task Manager Module', () => {
expect(testTasksData.tasks[1].status).toBe('done');
});
test('should throw error for invalid status', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Assert
expect(() =>
testUpdateSingleTaskStatus(testTasksData, '2', 'Done')
).toThrow(/Error: Invalid status value: Done./);
});
test('should update subtask status', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));