From 90068348d3ff9ad36390ef3420e8e0df5da08361 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 3 May 2025 18:13:24 +0000 Subject: [PATCH 01/12] Version Packages --- .changeset/pre.json | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/.changeset/pre.json b/.changeset/pre.json index 72461b35..0d16f4e6 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -6,25 +6,7 @@ }, "changesets": [ "beige-doodles-type", - "beige-rats-accept", - "blue-spies-kick", - "cuddly-zebras-matter", - "curvy-candies-eat", - "easy-toys-wash", - "every-stars-sell", - "fine-monkeys-eat", - "fine-signs-add", - "gentle-views-jump", - "mighty-mirrors-watch", - "neat-donkeys-shave", - "nine-rocks-sink", - "ninety-ghosts-relax", - "ninety-wombats-pull", - "public-cooks-fetch", "red-oranges-attend", - "red-suns-wash", - "tricky-papayas-hang", - "violet-papayas-see", - "violet-parrots-march" + "red-suns-wash" ] } From 4117f71c18ee4d321a9c91308d00d5d69bfac61e Mon Sep 17 00:00:00 2001 From: Joe Danziger Date: Tue, 13 May 2025 16:06:09 -0400 Subject: [PATCH 02/12] Fix CLI --force flag on parse-prd command --- .changeset/floppy-plants-marry.md | 9 +++++++++ scripts/modules/commands.js | 4 ++-- 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 .changeset/floppy-plants-marry.md diff --git a/.changeset/floppy-plants-marry.md b/.changeset/floppy-plants-marry.md new file mode 100644 index 00000000..401fed18 --- /dev/null +++ b/.changeset/floppy-plants-marry.md @@ -0,0 +1,9 @@ +--- +'task-master-ai': patch +--- + +Fix CLI --force flag for parse-prd command + +Previously, the --force flag was not respected when running `parse-prd`, causing the command to prompt for confirmation or fail even when --force was provided. This patch ensures that the flag is correctly passed and handled, allowing users to overwrite existing tasks.json files as intended. + +- Fixes #477 \ No newline at end of file diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index 21870f74..0e60642b 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -515,7 +515,7 @@ function registerCommands(programInstance) { const outputPath = options.output; const force = options.force || false; const append = options.append || false; - let useForce = false; + let useForce = force; let useAppend = false; // Helper function to check if tasks.json exists and confirm overwrite @@ -609,7 +609,7 @@ function registerCommands(programInstance) { spinner = ora('Parsing PRD and generating tasks...').start(); await parsePRD(inputFile, outputPath, numTasks, { append: useAppend, - force: useForce + useForce }); spinner.succeed('Tasks generated successfully!'); } catch (error) { From efce37469bc58eceef46763ba32df1ed45242211 Mon Sep 17 00:00:00 2001 From: Joe Danziger Date: Wed, 14 May 2025 07:12:15 -0400 Subject: [PATCH 03/12] Fix duplicate output on CLI help screen (#496) * remove duplication * add changeset * fix formatting --- .changeset/wide-eyes-relax.md | 11 +++++++++++ scripts/modules/commands.js | 14 +------------- 2 files changed, 12 insertions(+), 13 deletions(-) create mode 100644 .changeset/wide-eyes-relax.md diff --git a/.changeset/wide-eyes-relax.md b/.changeset/wide-eyes-relax.md new file mode 100644 index 00000000..1684d1a5 --- /dev/null +++ b/.changeset/wide-eyes-relax.md @@ -0,0 +1,11 @@ +--- +'task-master-ai': patch +--- + +Fix duplicate output on CLI help screen + +- Prevent the Task Master CLI from printing the help screen more than once when using `-h` or `--help`. +- Removed redundant manual event handlers and guards for help output; now only the Commander `.helpInformation` override is used for custom help. +- Simplified logic so that help is only shown once for both "no arguments" and help flag flows. +- Ensures a clean, branded help experience with no repeated content. +- Fixes #339 diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index 0e60642b..5d62b5e7 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -486,11 +486,6 @@ function registerCommands(programInstance) { process.exit(1); }); - // Default help - programInstance.on('--help', function () { - displayHelp(); - }); - // parse-prd command programInstance .command('parse-prd') @@ -2366,14 +2361,7 @@ function setupCLI() { return 'unknown'; // Default fallback if package.json fails }) .helpOption('-h, --help', 'Display help') - .addHelpCommand(false) // Disable default help command - .on('--help', () => { - displayHelp(); // Use your custom help display instead - }) - .on('-h', () => { - displayHelp(); - process.exit(0); - }); + .addHelpCommand(false); // Disable default help command // Modify the help option to use your custom display programInstance.helpInformation = () => { From c7042845d6f6b520be85e0e894c2477b15741d0a Mon Sep 17 00:00:00 2001 From: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com> Date: Thu, 15 May 2025 00:28:06 +0200 Subject: [PATCH 04/12] chore: improve CI to better accomodate pre-releases for testing (#507) --- .github/workflows/pre-release.yml | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 0bab3820..430c1aab 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -2,9 +2,6 @@ name: Pre-Release (RC) on: workflow_dispatch: # Allows manual triggering from GitHub UI/API - push: - branches: - - 'next' concurrency: pre-release-${{ github.ref }} @@ -41,12 +38,10 @@ jobs: npx changeset pre enter rc - name: Version RC packages - run: | - git config user.name "GitHub Actions" - git config user.email "github-actions@example.com" - npx changeset version - git add . - git commit -m "chore: rc version bump" || echo "No changes to commit" + run: npx changeset version + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NPM_TOKEN: ${{ secrets.NPM_TOKEN }} - name: Create Release Candidate Pull Request or Publish Release Candidate to npm uses: changesets/action@v1 @@ -55,3 +50,13 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} NPM_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Exit RC mode + run: npx changeset pre exit + + - name: Commit & Push changes + uses: actions-js/push@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ github.ref }} + message: 'chore: rc version bump' From dd90c9cb5dec8a0694d82967d6a4b77139693a97 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 3 May 2025 18:13:24 +0000 Subject: [PATCH 05/12] Version Packages --- .changeset/pre.json | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/.changeset/pre.json b/.changeset/pre.json index 72461b35..0d16f4e6 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -6,25 +6,7 @@ }, "changesets": [ "beige-doodles-type", - "beige-rats-accept", - "blue-spies-kick", - "cuddly-zebras-matter", - "curvy-candies-eat", - "easy-toys-wash", - "every-stars-sell", - "fine-monkeys-eat", - "fine-signs-add", - "gentle-views-jump", - "mighty-mirrors-watch", - "neat-donkeys-shave", - "nine-rocks-sink", - "ninety-ghosts-relax", - "ninety-wombats-pull", - "public-cooks-fetch", "red-oranges-attend", - "red-suns-wash", - "tricky-papayas-hang", - "violet-papayas-see", - "violet-parrots-march" + "red-suns-wash" ] } From a1f8d52474fdbdf48e17a63e3f567a6d63010d9f Mon Sep 17 00:00:00 2001 From: Kayvan Sylvan Date: Wed, 14 May 2025 16:09:41 -0700 Subject: [PATCH 06/12] chore: rename log level environment variable to `TASKMASTER_LOG_LEVEL` (#417) * chore: rename log level environment variable to `TASKMASTER_LOG_LEVEL` ### CHANGES - Update environment variable from `LOG_LEVEL` to `TASKMASTER_LOG_LEVEL`. - Reflect change in documentation for clarity. - Adjust variable name in script and test files. - Maintain default log level as `info`. * fix: add changeset * chore: rename `LOG_LEVEL` to `TASKMASTER_LOG_LEVEL` for consistency ### CHANGES - Update environment variable name to `TASKMASTER_LOG_LEVEL` in documentation. - Reflect rename in configuration rules for clarity. - Maintain consistency across project configuration settings. --- .changeset/slow-singers-swim.md | 5 +++++ .cursor/rules/dev_workflow.mdc | 2 +- assets/.windsurfrules | 2 +- assets/scripts_README.md | 6 +++--- scripts/README.md | 6 +++--- scripts/init.js | 8 ++++---- tests/setup.js | 2 +- 7 files changed, 18 insertions(+), 13 deletions(-) create mode 100644 .changeset/slow-singers-swim.md diff --git a/.changeset/slow-singers-swim.md b/.changeset/slow-singers-swim.md new file mode 100644 index 00000000..dadfda63 --- /dev/null +++ b/.changeset/slow-singers-swim.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Fix for issue #409 LOG_LEVEL Pydantic validation error diff --git a/.cursor/rules/dev_workflow.mdc b/.cursor/rules/dev_workflow.mdc index 4d430323..003251d8 100644 --- a/.cursor/rules/dev_workflow.mdc +++ b/.cursor/rules/dev_workflow.mdc @@ -116,7 +116,7 @@ Taskmaster configuration is managed through two main mechanisms: * For MCP/Cursor integration, configure these keys in the `env` section of `.cursor/mcp.json`. * Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.mdc`). -**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool. +**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool. **If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.cursor/mcp.json`. **If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project. diff --git a/assets/.windsurfrules b/assets/.windsurfrules index c253460c..a5cf07aa 100644 --- a/assets/.windsurfrules +++ b/assets/.windsurfrules @@ -198,7 +198,7 @@ alwaysApply: true - **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`) - **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`) - **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`) - - **LOG_LEVEL** (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`) + - **TASKMASTER_LOG_LEVEL** (Default: `"info"`): Console output level (Example: `TASKMASTER_LOG_LEVEL=debug`) - **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`) - **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`) - **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`) diff --git a/assets/scripts_README.md b/assets/scripts_README.md index 0d615389..1e76856f 100644 --- a/assets/scripts_README.md +++ b/assets/scripts_README.md @@ -31,7 +31,7 @@ Task Master configuration is now managed through two primary methods: - Create a `.env` file in your project root for CLI usage. - See `assets/env.example` for required key names. -**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead. +**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `TASKMASTER_LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead. ## How It Works @@ -42,7 +42,7 @@ Task Master configuration is now managed through two primary methods: - Tasks can have `subtasks` for more detailed implementation steps. - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress. -2. **CLI Commands** +2. **CLI Commands** You can run the commands via: ```bash @@ -200,7 +200,7 @@ Notes: ## Logging -The script supports different logging levels controlled by the `LOG_LEVEL` environment variable: +The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable: - `debug`: Detailed information, typically useful for troubleshooting - `info`: Confirmation that things are working as expected (default) diff --git a/scripts/README.md b/scripts/README.md index 640703e4..1b83c635 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -32,7 +32,7 @@ The script can be configured through environment variables in a `.env` file at t - `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation - `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online") - `DEBUG`: Enable debug logging (default: false) -- `LOG_LEVEL`: Log level - debug, info, warn, error (default: info) +- `TASKMASTER_LOG_LEVEL`: Log level - debug, info, warn, error (default: info) - `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3) - `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium) - `PROJECT_NAME`: Override default project name in tasks.json @@ -47,7 +47,7 @@ The script can be configured through environment variables in a `.env` file at t - Tasks can have `subtasks` for more detailed implementation steps. - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress. -2. **Script Commands** +2. **Script Commands** You can run the script via: ```bash @@ -225,7 +225,7 @@ To use the Perplexity integration: ## Logging -The script supports different logging levels controlled by the `LOG_LEVEL` environment variable: +The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable: - `debug`: Detailed information, typically useful for troubleshooting - `info`: Confirmation that things are working as expected (default) diff --git a/scripts/init.js b/scripts/init.js index efe776d7..9f636314 100755 --- a/scripts/init.js +++ b/scripts/init.js @@ -38,10 +38,10 @@ const LOG_LEVELS = { success: 4 }; -// Get log level from environment or default to info -const LOG_LEVEL = process.env.LOG_LEVEL - ? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] - : LOG_LEVELS.info; +// Determine log level from environment variable or default to 'info' +const LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL + ? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()] + : LOG_LEVELS.info; // Default to info // Create a color gradient for the banner const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']); diff --git a/tests/setup.js b/tests/setup.js index 8dedeacd..81e11109 100644 --- a/tests/setup.js +++ b/tests/setup.js @@ -9,7 +9,7 @@ process.env.MODEL = 'sonar-pro'; process.env.MAX_TOKENS = '64000'; process.env.TEMPERATURE = '0.2'; process.env.DEBUG = 'false'; -process.env.LOG_LEVEL = 'error'; // Set to error to reduce noise in tests +process.env.TASKMASTER_LOG_LEVEL = 'error'; // Set to error to reduce noise in tests process.env.DEFAULT_SUBTASKS = '5'; process.env.DEFAULT_PRIORITY = 'medium'; process.env.PROJECT_NAME = 'Test Project'; From 0a611843b56a856ef0a479dc34078326e05ac3a8 Mon Sep 17 00:00:00 2001 From: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com> Date: Thu, 15 May 2025 01:32:49 +0200 Subject: [PATCH 07/12] fix: Inline comments in .env.example conflicting with env variable values (#501) * fix: Update API key format in env.example to use quotes for consistency * chore: add changelog --- .changeset/soft-zoos-flow.md | 7 +++++++ assets/env.example | 14 +++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 .changeset/soft-zoos-flow.md diff --git a/.changeset/soft-zoos-flow.md b/.changeset/soft-zoos-flow.md new file mode 100644 index 00000000..d908fa64 --- /dev/null +++ b/.changeset/soft-zoos-flow.md @@ -0,0 +1,7 @@ +--- +'task-master-ai': patch +--- + +Fix initial .env.example to work out of the box + +- Closes #419 diff --git a/assets/env.example b/assets/env.example index d44c6b09..1c9b41e4 100644 --- a/assets/env.example +++ b/assets/env.example @@ -1,8 +1,8 @@ # API Keys (Required to enable respective provider) -ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-... -PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-... -OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-... -GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models. -MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models. -XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models. -AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). \ No newline at end of file +ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Required: Format: sk-ant-api03-... +PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Optional: Format: pplx-... +OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/OpenRouter models. Format: sk-proj-... +GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models. +MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models. +XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models. +AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). \ No newline at end of file From a96215a359b25061fd3b3f3c7b10e8ac0390c062 Mon Sep 17 00:00:00 2001 From: Lars Bell Date: Thu, 15 May 2025 12:01:21 +0100 Subject: [PATCH 08/12] Update .taskmasterconfig (#435) * Update .taskmasterconfig Max tokens in 3.5 is lower. With the current number get this error: Service call failed for role fallback (Provider: anthropic, Model: claude-3-5-sonnet-20240620): max_tokens: 120000 > 8192, which is the maximum allowed number of output tokens for claude-3-5-sonnet-20240620 * Fix fallback model ID format and update maxTokens in Taskmaster configuration --------- Co-authored-by: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com> --- .changeset/ten-ways-mate.md | 5 +++++ assets/.taskmasterconfig | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 .changeset/ten-ways-mate.md diff --git a/.changeset/ten-ways-mate.md b/.changeset/ten-ways-mate.md new file mode 100644 index 00000000..993d6e2d --- /dev/null +++ b/.changeset/ten-ways-mate.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Fix default fallback model and maxTokens in Taskmaster initialization diff --git a/assets/.taskmasterconfig b/assets/.taskmasterconfig index 0b874da5..2e015bf6 100644 --- a/assets/.taskmasterconfig +++ b/assets/.taskmasterconfig @@ -14,8 +14,8 @@ }, "fallback": { "provider": "anthropic", - "modelId": "claude-3.5-sonnet-20240620", - "maxTokens": 120000, + "modelId": "claude-3-5-sonnet-20240620", + "maxTokens": 8192, "temperature": 0.1 } }, From 17294ff25918d64278674e558698a1a9ad785098 Mon Sep 17 00:00:00 2001 From: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com> Date: Thu, 15 May 2025 22:41:16 +0200 Subject: [PATCH 09/12] Fix: Correct version resolution for banner and update check (#511) * Fix: Correct version resolution for banner and update check Resolves issues where the tool's version was displayed as 'unknown'. - Modified 'displayBanner' in 'ui.js' and 'checkForUpdate' in 'commands.js' to read package.json relative to their own script locations using import.meta.url. - This ensures the correct local version is identified for both the main banner display and the update notification mechanism. - Restored a missing closing brace in 'ui.js' to fix a SyntaxError. * fix: refactor and cleanup * fix: chores and cleanup and testing * chore: cleanup * fix: add changeset --------- Co-authored-by: Christer Soederlund --- .changeset/many-wasps-sell.md | 5 +++++ .changeset/pre.json | 4 ++-- package-lock.json | 4 ++-- package.json | 2 +- scripts/modules/commands.js | 30 +++--------------------------- scripts/modules/ui.js | 28 ++++------------------------ src/utils/getVersion.js | 35 +++++++++++++++++++++++++++++++++++ 7 files changed, 52 insertions(+), 56 deletions(-) create mode 100644 .changeset/many-wasps-sell.md create mode 100644 src/utils/getVersion.js diff --git a/.changeset/many-wasps-sell.md b/.changeset/many-wasps-sell.md new file mode 100644 index 00000000..75631311 --- /dev/null +++ b/.changeset/many-wasps-sell.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Task Master no longer tells you to update when you're already up to date diff --git a/.changeset/pre.json b/.changeset/pre.json index 0d16f4e6..e8be1d58 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -1,8 +1,8 @@ { - "mode": "pre", + "mode": "exit", "tag": "rc", "initialVersions": { - "task-master-ai": "0.13.1" + "task-master-ai": "0.13.2" }, "changesets": [ "beige-doodles-type", diff --git a/package-lock.json b/package-lock.json index 342dd287..ff03b4e2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "task-master-ai", - "version": "0.13.2-rc.1", + "version": "0.12", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "task-master-ai", - "version": "0.13.2-rc.1", + "version": "0.12", "license": "MIT WITH Commons-Clause", "dependencies": { "@ai-sdk/anthropic": "^1.2.10", diff --git a/package.json b/package.json index a9ef850d..039bcf41 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "task-master-ai", - "version": "0.13.2-rc.1", + "version": "0.13.2", "description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.", "main": "index.js", "type": "module", diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index 5d62b5e7..78c9c9de 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -73,7 +73,7 @@ import { getApiKeyStatusReport } from './task-manager/models.js'; import { findProjectRoot } from './utils.js'; - +import { getTaskMasterVersion } from '../../src/utils/getVersion.js'; /** * Runs the interactive setup process for model configuration. * @param {string|null} projectRoot - The resolved project root directory. @@ -1273,10 +1273,6 @@ function registerCommands(programInstance) { '--details
', 'Implementation details (for manual task creation)' ) - .option( - '--test-strategy ', - 'Test strategy (for manual task creation)' - ) .option( '--dependencies ', 'Comma-separated list of task IDs this task depends on' @@ -1658,6 +1654,7 @@ function registerCommands(programInstance) { } } catch (error) { console.error(chalk.red(`Error: ${error.message}`)); + showAddSubtaskHelp(); process.exit(1); } }) @@ -2381,28 +2378,7 @@ function setupCLI() { */ async function checkForUpdate() { // Get current version from package.json ONLY - let currentVersion = 'unknown'; // Initialize with a default - try { - // Try to get the version from the installed package (if applicable) or current dir - let packageJsonPath = path.join( - process.cwd(), - 'node_modules', - 'task-master-ai', - 'package.json' - ); - // Fallback to current directory package.json if not found in node_modules - if (!fs.existsSync(packageJsonPath)) { - packageJsonPath = path.join(process.cwd(), 'package.json'); - } - - if (fs.existsSync(packageJsonPath)) { - const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); - currentVersion = packageJson.version; - } - } catch (error) { - // Silently fail and use default - log('debug', `Error reading current package version: ${error.message}`); - } + const currentVersion = getTaskMasterVersion(); return new Promise((resolve) => { // Get the latest version from npm registry diff --git a/scripts/modules/ui.js b/scripts/modules/ui.js index 975a9055..e6ea4c54 100644 --- a/scripts/modules/ui.js +++ b/scripts/modules/ui.js @@ -16,10 +16,10 @@ import { truncate, isSilentMode } from './utils.js'; -import path from 'path'; import fs from 'fs'; import { findNextTask, analyzeTaskComplexity } from './task-manager.js'; import { getProjectName, getDefaultSubtasks } from './config-manager.js'; +import { getTaskMasterVersion } from '../../src/utils/getVersion.js'; // Create a color gradient for the banner const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']); @@ -46,17 +46,7 @@ function displayBanner() { ); // Read version directly from package.json - let version = 'unknown'; // Initialize with a default - try { - const packageJsonPath = path.join(process.cwd(), 'package.json'); - if (fs.existsSync(packageJsonPath)) { - const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); - version = packageJson.version; - } - } catch (error) { - // Silently fall back to default version - log('warn', 'Could not read package.json for version info.'); - } + const version = getTaskMasterVersion(); console.log( boxen( @@ -809,12 +799,7 @@ async function displayNextTask(tasksPath) { 'padding-bottom': 0, compact: true }, - chars: { - mid: '', - 'left-mid': '', - 'mid-mid': '', - 'right-mid': '' - }, + chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' }, colWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)], wordWrap: true }); @@ -902,12 +887,7 @@ async function displayNextTask(tasksPath) { 'padding-bottom': 0, compact: true }, - chars: { - mid: '', - 'left-mid': '', - 'mid-mid': '', - 'right-mid': '' - }, + chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' }, wordWrap: true }); diff --git a/src/utils/getVersion.js b/src/utils/getVersion.js new file mode 100644 index 00000000..55a64f40 --- /dev/null +++ b/src/utils/getVersion.js @@ -0,0 +1,35 @@ +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import { log } from '../../scripts/modules/utils.js'; + +/** + * Reads the version from the nearest package.json relative to this file. + * Returns 'unknown' if not found or on error. + * @returns {string} The version string or 'unknown'. + */ +export function getTaskMasterVersion() { + let version = 'unknown'; + try { + // Get the directory of the current module (getPackageVersion.js) + const currentModuleFilename = fileURLToPath(import.meta.url); + const currentModuleDirname = path.dirname(currentModuleFilename); + // Construct the path to package.json relative to this file (../../package.json) + const packageJsonPath = path.join( + currentModuleDirname, + '..', + '..', + 'package.json' + ); + + if (fs.existsSync(packageJsonPath)) { + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + version = packageJson.version; + } + } catch (error) { + // Silently fall back to default version + log('warn', 'Could not read own package.json for version info.', error); + } + return version; +} From e96734a6cc6fec7731de72eb46b182a6e3743d02 Mon Sep 17 00:00:00 2001 From: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com> Date: Thu, 15 May 2025 22:56:52 +0200 Subject: [PATCH 10/12] fix: updateTask enableSilentMode is not defined (#517) - Closes #412 --- .changeset/tricky-wombats-spend.md | 5 +++++ mcp-server/src/core/direct-functions/update-tasks.js | 4 ++++ 2 files changed, 9 insertions(+) create mode 100644 .changeset/tricky-wombats-spend.md diff --git a/.changeset/tricky-wombats-spend.md b/.changeset/tricky-wombats-spend.md new file mode 100644 index 00000000..2b961c9c --- /dev/null +++ b/.changeset/tricky-wombats-spend.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Fix bug when updating tasks on the MCP server (#412) diff --git a/mcp-server/src/core/direct-functions/update-tasks.js b/mcp-server/src/core/direct-functions/update-tasks.js index 3e485ae4..0b83a5dd 100644 --- a/mcp-server/src/core/direct-functions/update-tasks.js +++ b/mcp-server/src/core/direct-functions/update-tasks.js @@ -6,6 +6,10 @@ import path from 'path'; import { updateTasks } from '../../../../scripts/modules/task-manager.js'; import { createLogWrapper } from '../../tools/utils.js'; +import { + enableSilentMode, + disableSilentMode +} from '../../../../scripts/modules/utils.js'; /** * Direct function wrapper for updating tasks based on new context. From ed17cb0e0a04dedde6c616f68f24f3660f68dd04 Mon Sep 17 00:00:00 2001 From: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com> Date: Fri, 16 May 2025 15:34:29 +0200 Subject: [PATCH 11/12] feat: implement baseUrls on all ai providers(#521) --- .changeset/forty-plums-stay.md | 5 ++ docs/configuration.md | 11 +++-- package-lock.json | 4 +- scripts/modules/ai-services-unified.js | 13 ++++- scripts/modules/config-manager.js | 8 +++ src/ai-providers/anthropic.js | 46 ++++++++---------- src/ai-providers/google.js | 67 ++++++++++++-------------- src/ai-providers/openai.js | 40 ++++++++------- src/ai-providers/openrouter.js | 20 ++++++-- src/ai-providers/perplexity.js | 25 ++++++---- src/ai-providers/xai.js | 37 ++++++++------ tests/unit/ai-services-unified.test.js | 4 +- 12 files changed, 161 insertions(+), 119 deletions(-) create mode 100644 .changeset/forty-plums-stay.md diff --git a/.changeset/forty-plums-stay.md b/.changeset/forty-plums-stay.md new file mode 100644 index 00000000..d49e0653 --- /dev/null +++ b/.changeset/forty-plums-stay.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': minor +--- + +.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider. diff --git a/docs/configuration.md b/docs/configuration.md index f1e57560..615e184f 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -15,13 +15,15 @@ Taskmaster uses two primary methods for configuration: "provider": "anthropic", "modelId": "claude-3-7-sonnet-20250219", "maxTokens": 64000, - "temperature": 0.2 + "temperature": 0.2, + "baseUrl": "https://api.anthropic.com/v1" }, "research": { "provider": "perplexity", "modelId": "sonar-pro", "maxTokens": 8700, - "temperature": 0.1 + "temperature": 0.1, + "baseUrl": "https://api.perplexity.ai/v1" }, "fallback": { "provider": "anthropic", @@ -56,8 +58,9 @@ Taskmaster uses two primary methods for configuration: - `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`). - `OPENROUTER_API_KEY`: Your OpenRouter API key. - `XAI_API_KEY`: Your X-AI API key. - - **Optional Endpoint Overrides (in .taskmasterconfig):** - - `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key. + - **Optional Endpoint Overrides:** + - **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used. + - `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role). - `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`). **Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables. diff --git a/package-lock.json b/package-lock.json index ff03b4e2..2a437a53 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "task-master-ai", - "version": "0.12", + "version": "0.13.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "task-master-ai", - "version": "0.12", + "version": "0.13.2", "license": "MIT WITH Commons-Clause", "dependencies": { "@ai-sdk/anthropic": "^1.2.10", diff --git a/scripts/modules/ai-services-unified.js b/scripts/modules/ai-services-unified.js index fead4ad3..da958986 100644 --- a/scripts/modules/ai-services-unified.js +++ b/scripts/modules/ai-services-unified.js @@ -14,7 +14,8 @@ import { getResearchModelId, getFallbackProvider, getFallbackModelId, - getParametersForRole + getParametersForRole, + getBaseUrlForRole } from './config-manager.js'; import { log, resolveEnvVariable, findProjectRoot } from './utils.js'; @@ -284,7 +285,13 @@ async function _unifiedServiceRunner(serviceType, params) { 'AI service call failed for all configured roles.'; for (const currentRole of sequence) { - let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn; + let providerName, + modelId, + apiKey, + roleParams, + providerFnSet, + providerApiFn, + baseUrl; try { log('info', `New AI service call with role: ${currentRole}`); @@ -325,6 +332,7 @@ async function _unifiedServiceRunner(serviceType, params) { // Pass effectiveProjectRoot to getParametersForRole roleParams = getParametersForRole(currentRole, effectiveProjectRoot); + baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot); // 2. Get Provider Function Set providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()]; @@ -401,6 +409,7 @@ async function _unifiedServiceRunner(serviceType, params) { maxTokens: roleParams.maxTokens, temperature: roleParams.temperature, messages, + baseUrl, ...(serviceType === 'generateObject' && { schema, objectName }), ...restApiParams }; diff --git a/scripts/modules/config-manager.js b/scripts/modules/config-manager.js index e9302d08..a4ed94e5 100644 --- a/scripts/modules/config-manager.js +++ b/scripts/modules/config-manager.js @@ -677,6 +677,13 @@ function getAllProviders() { return Object.keys(MODEL_MAP || {}); } +function getBaseUrlForRole(role, explicitRoot = null) { + const roleConfig = getModelConfigForRole(role, explicitRoot); + return roleConfig && typeof roleConfig.baseUrl === 'string' + ? roleConfig.baseUrl + : undefined; +} + export { // Core config access getConfig, @@ -704,6 +711,7 @@ export { getFallbackModelId, getFallbackMaxTokens, getFallbackTemperature, + getBaseUrlForRole, // Global setting getters (No env var overrides) getLogLevel, diff --git a/src/ai-providers/anthropic.js b/src/ai-providers/anthropic.js index 1fa36f3d..27602757 100644 --- a/src/ai-providers/anthropic.js +++ b/src/ai-providers/anthropic.js @@ -5,7 +5,7 @@ * using the Vercel AI SDK. */ import { createAnthropic } from '@ai-sdk/anthropic'; -import { generateText, streamText, generateObject, streamObject } from 'ai'; +import { generateText, streamText, generateObject } from 'ai'; import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible // TODO: Implement standardized functions for generateText, streamText, generateObject @@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces // Remove the global variable and caching logic // let anthropicClient; -function getClient(apiKey) { +function getClient(apiKey, baseUrl) { if (!apiKey) { // In a real scenario, this would use the config resolver. // Throwing error here if key isn't passed for simplicity. @@ -30,14 +30,12 @@ function getClient(apiKey) { // Create and return a new instance directly with standard version header return createAnthropic({ apiKey: apiKey, - baseURL: 'https://api.anthropic.com/v1', + ...(baseUrl && { baseURL: baseUrl }), // Use standard version header instead of beta headers: { 'anthropic-beta': 'output-128k-2025-02-19' } }); - // } - // return anthropicClient; } // --- Standardized Service Function Implementations --- @@ -51,6 +49,7 @@ function getClient(apiKey) { * @param {Array} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]). * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - The base URL for the Anthropic API. * @returns {Promise} The generated text content. * @throws {Error} If the API call fails. */ @@ -59,11 +58,12 @@ export async function generateAnthropicText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Generating Anthropic text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const result = await generateText({ model: client(modelId), messages: messages, @@ -93,6 +93,7 @@ export async function generateAnthropicText({ * @param {Array} params.messages - The messages array. * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - The base URL for the Anthropic API. * @returns {Promise} The full stream result object from the Vercel AI SDK. * @throws {Error} If the API call fails to initiate the stream. */ @@ -101,20 +102,20 @@ export async function streamAnthropicText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Streaming Anthropic text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); - // --- DEBUG LOGGING --- >> log( 'debug', '[streamAnthropicText] Parameters received by streamText:', JSON.stringify( { - modelId: modelId, // Log modelId being used - messages: messages, // Log the messages array + modelId: modelId, + messages: messages, maxTokens: maxTokens, temperature: temperature }, @@ -122,25 +123,19 @@ export async function streamAnthropicText({ 2 ) ); - // --- << DEBUG LOGGING --- const stream = await streamText({ model: client(modelId), messages: messages, maxTokens: maxTokens, temperature: temperature - // Beta header moved to client initialization // TODO: Add other relevant parameters }); // *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream *** return stream; } catch (error) { - log( - 'error', - `Anthropic streamText failed: ${error.message}`, - error.stack // Log stack trace for more details - ); + log('error', `Anthropic streamText failed: ${error.message}`, error.stack); throw error; } } @@ -160,6 +155,7 @@ export async function streamAnthropicText({ * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.maxRetries] - Max retries for validation/generation. + * @param {string} [params.baseUrl] - The base URL for the Anthropic API. * @returns {Promise} The generated object matching the schema. * @throws {Error} If generation or validation fails. */ @@ -171,24 +167,22 @@ export async function generateAnthropicObject({ objectName = 'generated_object', maxTokens, temperature, - maxRetries = 3 + maxRetries = 3, + baseUrl }) { log( 'debug', `Generating Anthropic object ('${objectName}') with model: ${modelId}` ); try { - const client = getClient(apiKey); - - // Log basic debug info + const client = getClient(apiKey, baseUrl); log( 'debug', `Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}` ); - const result = await generateObject({ model: client(modelId), - mode: 'tool', // Anthropic generally uses 'tool' mode for structured output + mode: 'tool', schema: schema, messages: messages, tool: { @@ -199,14 +193,12 @@ export async function generateAnthropicObject({ temperature: temperature, maxRetries: maxRetries }); - log( 'debug', `Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}` ); return result.object; } catch (error) { - // Simple error logging log( 'error', `Anthropic generateObject ('${objectName}') failed: ${error.message}` diff --git a/src/ai-providers/google.js b/src/ai-providers/google.js index 037f9a3c..7428816b 100644 --- a/src/ai-providers/google.js +++ b/src/ai-providers/google.js @@ -12,6 +12,16 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default +function getClient(apiKey, baseUrl) { + if (!apiKey) { + throw new Error('Google API key is required.'); + } + return createGoogleGenerativeAI({ + apiKey: apiKey, + ...(baseUrl && { baseURL: baseUrl }) + }); +} + /** * Generates text using a Google AI model. * @@ -29,7 +39,8 @@ async function generateGoogleText({ modelId = DEFAULT_MODEL, temperature = DEFAULT_TEMPERATURE, messages, - maxTokens // Note: Vercel SDK might handle this differently, needs verification + maxTokens, + baseUrl }) { if (!apiKey) { throw new Error('Google API key is required.'); @@ -37,28 +48,21 @@ async function generateGoogleText({ log('info', `Generating text with Google model: ${modelId}`); try { - // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation - const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation - // const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval - const model = googleProvider(modelId); // Correct model retrieval - - // Construct payload suitable for Vercel SDK's generateText - // Note: The exact structure might depend on how messages are passed + const googleProvider = getClient(apiKey, baseUrl); + const model = googleProvider(modelId); const result = await generateText({ - model, // Pass the model instance - messages, // Pass the messages array directly + model, + messages, temperature, - maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available + maxOutputTokens: maxTokens }); - - // Assuming result structure provides text directly or within a property - return result.text; // Adjust based on actual SDK response + return result.text; } catch (error) { log( 'error', `Error generating text with Google (${modelId}): ${error.message}` ); - throw error; // Re-throw for unified service handler + throw error; } } @@ -79,7 +83,8 @@ async function streamGoogleText({ modelId = DEFAULT_MODEL, temperature = DEFAULT_TEMPERATURE, messages, - maxTokens + maxTokens, + baseUrl }) { if (!apiKey) { throw new Error('Google API key is required.'); @@ -87,19 +92,15 @@ async function streamGoogleText({ log('info', `Streaming text with Google model: ${modelId}`); try { - // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation - const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation - // const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval - const model = googleProvider(modelId); // Correct model retrieval - + const googleProvider = getClient(apiKey, baseUrl); + const model = googleProvider(modelId); const stream = await streamText({ - model, // Pass the model instance + model, messages, temperature, maxOutputTokens: maxTokens }); - - return stream; // Return the stream directly + return stream; } catch (error) { log( 'error', @@ -130,7 +131,8 @@ async function generateGoogleObject({ messages, schema, objectName, // Note: Vercel SDK might use this differently or not at all - maxTokens + maxTokens, + baseUrl }) { if (!apiKey) { throw new Error('Google API key is required.'); @@ -138,23 +140,16 @@ async function generateGoogleObject({ log('info', `Generating object with Google model: ${modelId}`); try { - // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation - const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation - // const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval - const model = googleProvider(modelId); // Correct model retrieval - + const googleProvider = getClient(apiKey, baseUrl); + const model = googleProvider(modelId); const { object } = await generateObject({ - model, // Pass the model instance + model, schema, messages, temperature, maxOutputTokens: maxTokens - // Note: 'objectName' or 'mode' might not be directly applicable here - // depending on how `@ai-sdk/google` handles `generateObject`. - // Check SDK docs if specific tool calling/JSON mode needs explicit setup. }); - - return object; // Return the parsed object + return object; } catch (error) { log( 'error', diff --git a/src/ai-providers/openai.js b/src/ai-providers/openai.js index ce34e957..3a0f2090 100644 --- a/src/ai-providers/openai.js +++ b/src/ai-providers/openai.js @@ -1,16 +1,26 @@ -import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK -import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai' +import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK +import { generateObject } from 'ai'; // Import necessary functions from 'ai' import { log } from '../../scripts/modules/utils.js'; +function getClient(apiKey, baseUrl) { + if (!apiKey) { + throw new Error('OpenAI API key is required.'); + } + return createOpenAI({ + apiKey: apiKey, + ...(baseUrl && { baseURL: baseUrl }) + }); +} + /** * Generates text using OpenAI models via Vercel AI SDK. * - * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature. + * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl. * @returns {Promise} The generated text content. * @throws {Error} If API call fails. */ export async function generateOpenAIText(params) { - const { apiKey, modelId, messages, maxTokens, temperature } = params; + const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params; log('debug', `generateOpenAIText called with model: ${modelId}`); if (!apiKey) { @@ -23,18 +33,15 @@ export async function generateOpenAIText(params) { throw new Error('Invalid or empty messages array provided for OpenAI.'); } - const openaiClient = createOpenAI({ apiKey }); + const openaiClient = getClient(apiKey, baseUrl); try { const result = await openaiClient.chat(messages, { - // Updated: Use openaiClient.chat directly model: modelId, max_tokens: maxTokens, temperature }); - // Adjust based on actual Vercel SDK response structure for openaiClient.chat - // This might need refinement based on testing the SDK's output. const textContent = result?.choices?.[0]?.message?.content?.trim(); if (!textContent) { @@ -65,12 +72,12 @@ export async function generateOpenAIText(params) { /** * Streams text using OpenAI models via Vercel AI SDK. * - * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature. + * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl. * @returns {Promise} A readable stream of text deltas. * @throws {Error} If API call fails. */ export async function streamOpenAIText(params) { - const { apiKey, modelId, messages, maxTokens, temperature } = params; + const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params; log('debug', `streamOpenAIText called with model: ${modelId}`); if (!apiKey) { @@ -85,12 +92,10 @@ export async function streamOpenAIText(params) { ); } - const openaiClient = createOpenAI({ apiKey }); + const openaiClient = getClient(apiKey, baseUrl); try { - // Use the streamText function from Vercel AI SDK core const stream = await openaiClient.chat.stream(messages, { - // Updated: Use openaiClient.chat.stream model: modelId, max_tokens: maxTokens, temperature @@ -100,7 +105,6 @@ export async function streamOpenAIText(params) { 'debug', `OpenAI streamText initiated successfully for model: ${modelId}` ); - // The Vercel SDK's streamText should directly return the stream object return stream; } catch (error) { log( @@ -117,7 +121,7 @@ export async function streamOpenAIText(params) { /** * Generates structured objects using OpenAI models via Vercel AI SDK. * - * @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature. + * @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl. * @returns {Promise} The generated object matching the schema. * @throws {Error} If API call fails or object generation fails. */ @@ -129,7 +133,8 @@ export async function generateOpenAIObject(params) { schema, objectName, maxTokens, - temperature + temperature, + baseUrl } = params; log( 'debug', @@ -145,10 +150,9 @@ export async function generateOpenAIObject(params) { if (!objectName) throw new Error('Object name is required for OpenAI object generation.'); - const openaiClient = createOpenAI({ apiKey }); + const openaiClient = getClient(apiKey, baseUrl); try { - // Use the imported generateObject function from 'ai' package const result = await generateObject({ model: openaiClient(modelId), schema: schema, diff --git a/src/ai-providers/openrouter.js b/src/ai-providers/openrouter.js index 594d208c..f842cbf2 100644 --- a/src/ai-providers/openrouter.js +++ b/src/ai-providers/openrouter.js @@ -2,6 +2,14 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider'; import { generateText, streamText, generateObject } from 'ai'; import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules +function getClient(apiKey, baseUrl) { + if (!apiKey) throw new Error('OpenRouter API key is required.'); + return createOpenRouter({ + apiKey, + ...(baseUrl && { baseURL: baseUrl }) + }); +} + /** * Generates text using an OpenRouter chat model. * @@ -11,6 +19,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in * @param {Array} params.messages - Array of message objects (system, user, assistant). * @param {number} [params.maxTokens] - Maximum tokens to generate. * @param {number} [params.temperature] - Sampling temperature. + * @param {string} [params.baseUrl] - Base URL for the OpenRouter API. * @returns {Promise} The generated text content. * @throws {Error} If the API call fails. */ @@ -20,6 +29,7 @@ async function generateOpenRouterText({ messages, maxTokens, temperature, + baseUrl, ...rest // Capture any other Vercel AI SDK compatible parameters }) { if (!apiKey) throw new Error('OpenRouter API key is required.'); @@ -28,7 +38,7 @@ async function generateOpenRouterText({ throw new Error('Messages array cannot be empty.'); try { - const openrouter = createOpenRouter({ apiKey }); + const openrouter = getClient(apiKey, baseUrl); const model = openrouter.chat(modelId); // Assuming chat model const { text } = await generateText({ @@ -58,6 +68,7 @@ async function generateOpenRouterText({ * @param {Array} params.messages - Array of message objects (system, user, assistant). * @param {number} [params.maxTokens] - Maximum tokens to generate. * @param {number} [params.temperature] - Sampling temperature. + * @param {string} [params.baseUrl] - Base URL for the OpenRouter API. * @returns {Promise>} A readable stream of text deltas. * @throws {Error} If the API call fails. */ @@ -67,6 +78,7 @@ async function streamOpenRouterText({ messages, maxTokens, temperature, + baseUrl, ...rest }) { if (!apiKey) throw new Error('OpenRouter API key is required.'); @@ -75,7 +87,7 @@ async function streamOpenRouterText({ throw new Error('Messages array cannot be empty.'); try { - const openrouter = createOpenRouter({ apiKey }); + const openrouter = getClient(apiKey, baseUrl); const model = openrouter.chat(modelId); // Directly return the stream from the Vercel AI SDK function @@ -108,6 +120,7 @@ async function streamOpenRouterText({ * @param {number} [params.maxRetries=3] - Max retries for object generation. * @param {number} [params.maxTokens] - Maximum tokens. * @param {number} [params.temperature] - Temperature. + * @param {string} [params.baseUrl] - Base URL for the OpenRouter API. * @returns {Promise} The generated object matching the schema. * @throws {Error} If the API call fails or validation fails. */ @@ -120,6 +133,7 @@ async function generateOpenRouterObject({ maxRetries = 3, maxTokens, temperature, + baseUrl, ...rest }) { if (!apiKey) throw new Error('OpenRouter API key is required.'); @@ -129,7 +143,7 @@ async function generateOpenRouterObject({ throw new Error('Messages array cannot be empty.'); try { - const openrouter = createOpenRouter({ apiKey }); + const openrouter = getClient(apiKey, baseUrl); const model = openrouter.chat(modelId); const { object } = await generateObject({ diff --git a/src/ai-providers/perplexity.js b/src/ai-providers/perplexity.js index e8982d6f..7255753d 100644 --- a/src/ai-providers/perplexity.js +++ b/src/ai-providers/perplexity.js @@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js'; // --- Client Instantiation --- // Similar to Anthropic, this expects the resolved API key to be passed in. -function getClient(apiKey) { +function getClient(apiKey, baseUrl) { if (!apiKey) { throw new Error('Perplexity API key is required.'); } - // Create and return a new instance directly return createPerplexity({ - apiKey: apiKey + apiKey: apiKey, + ...(baseUrl && { baseURL: baseUrl }) }); } @@ -31,6 +31,7 @@ function getClient(apiKey) { * @param {Array} params.messages - The messages array. * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - Base URL for the Perplexity API. * @returns {Promise} The generated text content. * @throws {Error} If the API call fails. */ @@ -39,11 +40,12 @@ export async function generatePerplexityText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Generating Perplexity text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const result = await generateText({ model: client(modelId), messages: messages, @@ -70,6 +72,7 @@ export async function generatePerplexityText({ * @param {Array} params.messages - The messages array. * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - Base URL for the Perplexity API. * @returns {Promise} The full stream result object from the Vercel AI SDK. * @throws {Error} If the API call fails to initiate the stream. */ @@ -78,11 +81,12 @@ export async function streamPerplexityText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Streaming Perplexity text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const stream = await streamText({ model: client(modelId), messages: messages, @@ -112,6 +116,7 @@ export async function streamPerplexityText({ * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.maxRetries] - Max retries for validation/generation. + * @param {string} [params.baseUrl] - Base URL for the Perplexity API. * @returns {Promise} The generated object matching the schema. * @throws {Error} If generation or validation fails or is unsupported. */ @@ -123,7 +128,8 @@ export async function generatePerplexityObject({ objectName = 'generated_object', maxTokens, temperature, - maxRetries = 1 // Lower retries as support might be limited + maxRetries = 1, + baseUrl }) { log( 'debug', @@ -134,8 +140,7 @@ export async function generatePerplexityObject({ 'generateObject support for Perplexity might be limited or experimental.' ); try { - const client = getClient(apiKey); - // Attempt using generateObject, but be prepared for potential issues + const client = getClient(apiKey, baseUrl); const result = await generateObject({ model: client(modelId), schema: schema, diff --git a/src/ai-providers/xai.js b/src/ai-providers/xai.js index 1886e787..fa2cc954 100644 --- a/src/ai-providers/xai.js +++ b/src/ai-providers/xai.js @@ -9,14 +9,13 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible // --- Client Instantiation --- -function getClient(apiKey) { +function getClient(apiKey, baseUrl) { if (!apiKey) { throw new Error('xAI API key is required.'); } - // Create and return a new instance directly return createXai({ - apiKey: apiKey - // Add baseURL or other options if needed later + apiKey: apiKey, + ...(baseUrl && { baseURL: baseUrl }) }); } @@ -31,6 +30,7 @@ function getClient(apiKey) { * @param {Array} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]). * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - The base URL for the xAI API. * @returns {Promise} The generated text content. * @throws {Error} If the API call fails. */ @@ -39,13 +39,14 @@ export async function generateXaiText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Generating xAI text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const result = await generateText({ - model: client(modelId), // Correct model invocation + model: client(modelId), messages: messages, maxTokens: maxTokens, temperature: temperature @@ -70,6 +71,7 @@ export async function generateXaiText({ * @param {Array} params.messages - The messages array. * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - The base URL for the xAI API. * @returns {Promise} The full stream result object from the Vercel AI SDK. * @throws {Error} If the API call fails to initiate the stream. */ @@ -78,18 +80,19 @@ export async function streamXaiText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Streaming xAI text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const stream = await streamText({ - model: client(modelId), // Correct model invocation + model: client(modelId), messages: messages, maxTokens: maxTokens, temperature: temperature }); - return stream; // Return the full stream object + return stream; } catch (error) { log('error', `xAI streamText failed: ${error.message}`, error.stack); throw error; @@ -110,6 +113,7 @@ export async function streamXaiText({ * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.maxRetries] - Max retries for validation/generation. + * @param {string} [params.baseUrl] - The base URL for the xAI API. * @returns {Promise} The generated object matching the schema. * @throws {Error} If generation or validation fails. */ @@ -121,16 +125,17 @@ export async function generateXaiObject({ objectName = 'generated_xai_object', maxTokens, temperature, - maxRetries = 3 + maxRetries = 3, + baseUrl }) { log( - 'warn', // Log warning as this is likely unsupported + 'warn', `Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.` ); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const result = await generateObject({ - model: client(modelId), // Correct model invocation + model: client(modelId), // Note: mode might need adjustment if xAI ever supports object generation differently mode: 'tool', schema: schema, @@ -153,6 +158,6 @@ export async function generateXaiObject({ 'error', `xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)` ); - throw error; // Re-throw the error + throw error; } } diff --git a/tests/unit/ai-services-unified.test.js b/tests/unit/ai-services-unified.test.js index 59e3d32b..4098e75e 100644 --- a/tests/unit/ai-services-unified.test.js +++ b/tests/unit/ai-services-unified.test.js @@ -8,6 +8,7 @@ const mockGetResearchModelId = jest.fn(); const mockGetFallbackProvider = jest.fn(); const mockGetFallbackModelId = jest.fn(); const mockGetParametersForRole = jest.fn(); +const mockGetBaseUrlForRole = jest.fn(); jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({ getMainProvider: mockGetMainProvider, @@ -16,7 +17,8 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({ getResearchModelId: mockGetResearchModelId, getFallbackProvider: mockGetFallbackProvider, getFallbackModelId: mockGetFallbackModelId, - getParametersForRole: mockGetParametersForRole + getParametersForRole: mockGetParametersForRole, + getBaseUrlForRole: mockGetBaseUrlForRole })); // Mock AI Provider Modules From da317f2607ca34db1be78c19954996f634c40923 Mon Sep 17 00:00:00 2001 From: Ralph Khreish <35776126+Crunchyman-ralph@users.noreply.github.com> Date: Fri, 16 May 2025 15:47:01 +0200 Subject: [PATCH 12/12] fix: error handling of task status settings (#523) * fix: error handling of task status settings * fix: update import path --------- Co-authored-by: shenysun --- .changeset/sharp-dingos-melt.md | 5 +++ mcp-server/src/tools/set-task-status.js | 3 +- scripts/modules/commands.js | 16 +++++++++- .../modules/task-manager/set-task-status.js | 9 ++++++ .../task-manager/update-single-task-status.js | 7 ++++ scripts/modules/ui.js | 3 +- src/constants/task-status.js | 32 +++++++++++++++++++ tests/unit/task-manager.test.js | 20 ++++++++++++ 8 files changed, 92 insertions(+), 3 deletions(-) create mode 100644 .changeset/sharp-dingos-melt.md create mode 100644 src/constants/task-status.js diff --git a/.changeset/sharp-dingos-melt.md b/.changeset/sharp-dingos-melt.md new file mode 100644 index 00000000..4c2d9fd9 --- /dev/null +++ b/.changeset/sharp-dingos-melt.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Fix the error handling of task status settings diff --git a/mcp-server/src/tools/set-task-status.js b/mcp-server/src/tools/set-task-status.js index d92b1b1c..04ae9052 100644 --- a/mcp-server/src/tools/set-task-status.js +++ b/mcp-server/src/tools/set-task-status.js @@ -11,6 +11,7 @@ import { } from './utils.js'; import { setTaskStatusDirect } from '../core/task-master-core.js'; import { findTasksJsonPath } from '../core/utils/path-utils.js'; +import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js'; /** * Register the setTaskStatus tool with the MCP server @@ -27,7 +28,7 @@ export function registerSetTaskStatusTool(server) { "Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once." ), status: z - .string() + .enum(TASK_STATUS_OPTIONS) .describe( "New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'." ), diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index 78c9c9de..2ccc2412 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -73,6 +73,10 @@ import { getApiKeyStatusReport } from './task-manager/models.js'; import { findProjectRoot } from './utils.js'; +import { + isValidTaskStatus, + TASK_STATUS_OPTIONS +} from '../../src/constants/task-status.js'; import { getTaskMasterVersion } from '../../src/utils/getVersion.js'; /** * Runs the interactive setup process for model configuration. @@ -1033,7 +1037,7 @@ function registerCommands(programInstance) { ) .option( '-s, --status ', - 'New status (todo, in-progress, review, done)' + `New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})` ) .option('-f, --file ', 'Path to the tasks file', 'tasks/tasks.json') .action(async (options) => { @@ -1046,6 +1050,16 @@ function registerCommands(programInstance) { process.exit(1); } + if (!isValidTaskStatus(status)) { + console.error( + chalk.red( + `Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` + ) + ); + + process.exit(1); + } + console.log( chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`) ); diff --git a/scripts/modules/task-manager/set-task-status.js b/scripts/modules/task-manager/set-task-status.js index f8b5fc3e..9278fdff 100644 --- a/scripts/modules/task-manager/set-task-status.js +++ b/scripts/modules/task-manager/set-task-status.js @@ -8,6 +8,10 @@ import { validateTaskDependencies } from '../dependency-manager.js'; import { getDebugFlag } from '../config-manager.js'; import updateSingleTaskStatus from './update-single-task-status.js'; import generateTaskFiles from './generate-task-files.js'; +import { + isValidTaskStatus, + TASK_STATUS_OPTIONS +} from '../../../src/constants/task-status.js'; /** * Set the status of a task @@ -19,6 +23,11 @@ import generateTaskFiles from './generate-task-files.js'; */ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) { try { + if (!isValidTaskStatus(newStatus)) { + throw new Error( + `Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` + ); + } // Determine if we're in MCP mode by checking for mcpLog const isMcpMode = !!options?.mcpLog; diff --git a/scripts/modules/task-manager/update-single-task-status.js b/scripts/modules/task-manager/update-single-task-status.js index e9839e3a..b8b5d3a2 100644 --- a/scripts/modules/task-manager/update-single-task-status.js +++ b/scripts/modules/task-manager/update-single-task-status.js @@ -1,6 +1,7 @@ import chalk from 'chalk'; import { log } from '../utils.js'; +import { isValidTaskStatus } from '../../../src/constants/task-status.js'; /** * Update the status of a single task @@ -17,6 +18,12 @@ async function updateSingleTaskStatus( data, showUi = true ) { + if (!isValidTaskStatus(newStatus)) { + throw new Error( + `Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` + ); + } + // Check if it's a subtask (e.g., "1.2") if (taskIdInput.includes('.')) { const [parentId, subtaskId] = taskIdInput diff --git a/scripts/modules/ui.js b/scripts/modules/ui.js index e6ea4c54..a88edc98 100644 --- a/scripts/modules/ui.js +++ b/scripts/modules/ui.js @@ -19,6 +19,7 @@ import { import fs from 'fs'; import { findNextTask, analyzeTaskComplexity } from './task-manager.js'; import { getProjectName, getDefaultSubtasks } from './config-manager.js'; +import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js'; import { getTaskMasterVersion } from '../../src/utils/getVersion.js'; // Create a color gradient for the banner @@ -448,7 +449,7 @@ function displayHelp() { { name: 'set-status', args: '--id= --status=', - desc: 'Update task status (done, pending, etc.)' + desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})` }, { name: 'update', diff --git a/src/constants/task-status.js b/src/constants/task-status.js new file mode 100644 index 00000000..ebad5a16 --- /dev/null +++ b/src/constants/task-status.js @@ -0,0 +1,32 @@ +/** + * @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus + */ + +/** + * Task status options list + * @type {TaskStatus[]} + * @description Defines possible task statuses: + * - pending: Task waiting to start + * - done: Task completed + * - in-progress: Task in progress + * - review: Task completed and waiting for review + * - deferred: Task postponed or paused + * - cancelled: Task cancelled and will not be completed + */ +export const TASK_STATUS_OPTIONS = [ + 'pending', + 'done', + 'in-progress', + 'review', + 'deferred', + 'cancelled' +]; + +/** + * Check if a given status is a valid task status + * @param {string} status - The status to check + * @returns {boolean} True if the status is valid, false otherwise + */ +export function isValidTaskStatus(status) { + return TASK_STATUS_OPTIONS.includes(status); +} diff --git a/tests/unit/task-manager.test.js b/tests/unit/task-manager.test.js index fcba1be3..ec4725e5 100644 --- a/tests/unit/task-manager.test.js +++ b/tests/unit/task-manager.test.js @@ -199,6 +199,12 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => { // Simplified version of updateSingleTaskStatus for testing const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => { + if (!isValidTaskStatus(newStatus)) { + throw new Error( + `Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` + ); + } + // Check if it's a subtask (e.g., "1.2") if (taskIdInput.includes('.')) { const [parentId, subtaskId] = taskIdInput @@ -329,6 +335,10 @@ const testAddTask = ( import * as taskManager from '../../scripts/modules/task-manager.js'; import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js'; import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js'; +import { + isValidTaskStatus, + TASK_STATUS_OPTIONS +} from '../../src/constants/task-status.js'; // Destructure the required functions for convenience const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } = @@ -1165,6 +1175,16 @@ describe('Task Manager Module', () => { expect(testTasksData.tasks[1].status).toBe('done'); }); + test('should throw error for invalid status', async () => { + // Arrange + const testTasksData = JSON.parse(JSON.stringify(sampleTasks)); + + // Assert + expect(() => + testUpdateSingleTaskStatus(testTasksData, '2', 'Done') + ).toThrow(/Error: Invalid status value: Done./); + }); + test('should update subtask status', async () => { // Arrange const testTasksData = JSON.parse(JSON.stringify(sampleTasks));