diff --git a/.changeset/floppy-plants-marry.md b/.changeset/floppy-plants-marry.md new file mode 100644 index 00000000..401fed18 --- /dev/null +++ b/.changeset/floppy-plants-marry.md @@ -0,0 +1,9 @@ +--- +'task-master-ai': patch +--- + +Fix CLI --force flag for parse-prd command + +Previously, the --force flag was not respected when running `parse-prd`, causing the command to prompt for confirmation or fail even when --force was provided. This patch ensures that the flag is correctly passed and handled, allowing users to overwrite existing tasks.json files as intended. + +- Fixes #477 \ No newline at end of file diff --git a/.changeset/forty-plums-stay.md b/.changeset/forty-plums-stay.md new file mode 100644 index 00000000..d49e0653 --- /dev/null +++ b/.changeset/forty-plums-stay.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': minor +--- + +.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider. diff --git a/.changeset/many-wasps-sell.md b/.changeset/many-wasps-sell.md new file mode 100644 index 00000000..75631311 --- /dev/null +++ b/.changeset/many-wasps-sell.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Task Master no longer tells you to update when you're already up to date diff --git a/.changeset/pre.json b/.changeset/pre.json index 72461b35..e8be1d58 100644 --- a/.changeset/pre.json +++ b/.changeset/pre.json @@ -1,30 +1,12 @@ { - "mode": "pre", + "mode": "exit", "tag": "rc", "initialVersions": { - "task-master-ai": "0.13.1" + "task-master-ai": "0.13.2" }, "changesets": [ "beige-doodles-type", - "beige-rats-accept", - "blue-spies-kick", - "cuddly-zebras-matter", - "curvy-candies-eat", - "easy-toys-wash", - "every-stars-sell", - "fine-monkeys-eat", - "fine-signs-add", - "gentle-views-jump", - "mighty-mirrors-watch", - "neat-donkeys-shave", - "nine-rocks-sink", - "ninety-ghosts-relax", - "ninety-wombats-pull", - "public-cooks-fetch", "red-oranges-attend", - "red-suns-wash", - "tricky-papayas-hang", - "violet-papayas-see", - "violet-parrots-march" + "red-suns-wash" ] } diff --git a/.changeset/sharp-dingos-melt.md b/.changeset/sharp-dingos-melt.md new file mode 100644 index 00000000..4c2d9fd9 --- /dev/null +++ b/.changeset/sharp-dingos-melt.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Fix the error handling of task status settings diff --git a/.changeset/slow-singers-swim.md b/.changeset/slow-singers-swim.md new file mode 100644 index 00000000..dadfda63 --- /dev/null +++ b/.changeset/slow-singers-swim.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Fix for issue #409 LOG_LEVEL Pydantic validation error diff --git a/.changeset/soft-zoos-flow.md b/.changeset/soft-zoos-flow.md new file mode 100644 index 00000000..d908fa64 --- /dev/null +++ b/.changeset/soft-zoos-flow.md @@ -0,0 +1,7 @@ +--- +'task-master-ai': patch +--- + +Fix initial .env.example to work out of the box + +- Closes #419 diff --git a/.changeset/ten-ways-mate.md b/.changeset/ten-ways-mate.md new file mode 100644 index 00000000..993d6e2d --- /dev/null +++ b/.changeset/ten-ways-mate.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Fix default fallback model and maxTokens in Taskmaster initialization diff --git a/.changeset/tricky-wombats-spend.md b/.changeset/tricky-wombats-spend.md new file mode 100644 index 00000000..2b961c9c --- /dev/null +++ b/.changeset/tricky-wombats-spend.md @@ -0,0 +1,5 @@ +--- +'task-master-ai': patch +--- + +Fix bug when updating tasks on the MCP server (#412) diff --git a/.changeset/wide-eyes-relax.md b/.changeset/wide-eyes-relax.md new file mode 100644 index 00000000..1684d1a5 --- /dev/null +++ b/.changeset/wide-eyes-relax.md @@ -0,0 +1,11 @@ +--- +'task-master-ai': patch +--- + +Fix duplicate output on CLI help screen + +- Prevent the Task Master CLI from printing the help screen more than once when using `-h` or `--help`. +- Removed redundant manual event handlers and guards for help output; now only the Commander `.helpInformation` override is used for custom help. +- Simplified logic so that help is only shown once for both "no arguments" and help flag flows. +- Ensures a clean, branded help experience with no repeated content. +- Fixes #339 diff --git a/.cursor/rules/dev_workflow.mdc b/.cursor/rules/dev_workflow.mdc index 4d430323..003251d8 100644 --- a/.cursor/rules/dev_workflow.mdc +++ b/.cursor/rules/dev_workflow.mdc @@ -116,7 +116,7 @@ Taskmaster configuration is managed through two main mechanisms: * For MCP/Cursor integration, configure these keys in the `env` section of `.cursor/mcp.json`. * Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.mdc`). -**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool. +**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool. **If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.cursor/mcp.json`. **If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project. diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 0bab3820..430c1aab 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -2,9 +2,6 @@ name: Pre-Release (RC) on: workflow_dispatch: # Allows manual triggering from GitHub UI/API - push: - branches: - - 'next' concurrency: pre-release-${{ github.ref }} @@ -41,12 +38,10 @@ jobs: npx changeset pre enter rc - name: Version RC packages - run: | - git config user.name "GitHub Actions" - git config user.email "github-actions@example.com" - npx changeset version - git add . - git commit -m "chore: rc version bump" || echo "No changes to commit" + run: npx changeset version + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NPM_TOKEN: ${{ secrets.NPM_TOKEN }} - name: Create Release Candidate Pull Request or Publish Release Candidate to npm uses: changesets/action@v1 @@ -55,3 +50,13 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} NPM_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Exit RC mode + run: npx changeset pre exit + + - name: Commit & Push changes + uses: actions-js/push@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ github.ref }} + message: 'chore: rc version bump' diff --git a/assets/.taskmasterconfig b/assets/.taskmasterconfig index 0b874da5..2e015bf6 100644 --- a/assets/.taskmasterconfig +++ b/assets/.taskmasterconfig @@ -14,8 +14,8 @@ }, "fallback": { "provider": "anthropic", - "modelId": "claude-3.5-sonnet-20240620", - "maxTokens": 120000, + "modelId": "claude-3-5-sonnet-20240620", + "maxTokens": 8192, "temperature": 0.1 } }, diff --git a/assets/.windsurfrules b/assets/.windsurfrules index c253460c..a5cf07aa 100644 --- a/assets/.windsurfrules +++ b/assets/.windsurfrules @@ -198,7 +198,7 @@ alwaysApply: true - **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`) - **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`) - **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`) - - **LOG_LEVEL** (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`) + - **TASKMASTER_LOG_LEVEL** (Default: `"info"`): Console output level (Example: `TASKMASTER_LOG_LEVEL=debug`) - **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`) - **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`) - **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`) diff --git a/assets/env.example b/assets/env.example index d44c6b09..1c9b41e4 100644 --- a/assets/env.example +++ b/assets/env.example @@ -1,8 +1,8 @@ # API Keys (Required to enable respective provider) -ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-... -PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-... -OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-... -GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models. -MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models. -XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models. -AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). \ No newline at end of file +ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Required: Format: sk-ant-api03-... +PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Optional: Format: pplx-... +OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/OpenRouter models. Format: sk-proj-... +GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models. +MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models. +XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models. +AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). \ No newline at end of file diff --git a/assets/scripts_README.md b/assets/scripts_README.md index 0d615389..1e76856f 100644 --- a/assets/scripts_README.md +++ b/assets/scripts_README.md @@ -31,7 +31,7 @@ Task Master configuration is now managed through two primary methods: - Create a `.env` file in your project root for CLI usage. - See `assets/env.example` for required key names. -**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead. +**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `TASKMASTER_LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead. ## How It Works @@ -42,7 +42,7 @@ Task Master configuration is now managed through two primary methods: - Tasks can have `subtasks` for more detailed implementation steps. - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress. -2. **CLI Commands** +2. **CLI Commands** You can run the commands via: ```bash @@ -200,7 +200,7 @@ Notes: ## Logging -The script supports different logging levels controlled by the `LOG_LEVEL` environment variable: +The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable: - `debug`: Detailed information, typically useful for troubleshooting - `info`: Confirmation that things are working as expected (default) diff --git a/docs/configuration.md b/docs/configuration.md index f1e57560..615e184f 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -15,13 +15,15 @@ Taskmaster uses two primary methods for configuration: "provider": "anthropic", "modelId": "claude-3-7-sonnet-20250219", "maxTokens": 64000, - "temperature": 0.2 + "temperature": 0.2, + "baseUrl": "https://api.anthropic.com/v1" }, "research": { "provider": "perplexity", "modelId": "sonar-pro", "maxTokens": 8700, - "temperature": 0.1 + "temperature": 0.1, + "baseUrl": "https://api.perplexity.ai/v1" }, "fallback": { "provider": "anthropic", @@ -56,8 +58,9 @@ Taskmaster uses two primary methods for configuration: - `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`). - `OPENROUTER_API_KEY`: Your OpenRouter API key. - `XAI_API_KEY`: Your X-AI API key. - - **Optional Endpoint Overrides (in .taskmasterconfig):** - - `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key. + - **Optional Endpoint Overrides:** + - **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used. + - `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role). - `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`). **Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables. diff --git a/mcp-server/src/core/direct-functions/update-tasks.js b/mcp-server/src/core/direct-functions/update-tasks.js index 2884958c..f4cd6d8d 100644 --- a/mcp-server/src/core/direct-functions/update-tasks.js +++ b/mcp-server/src/core/direct-functions/update-tasks.js @@ -6,6 +6,10 @@ import path from 'path'; import { updateTasks } from '../../../../scripts/modules/task-manager.js'; import { createLogWrapper } from '../../tools/utils.js'; +import { + enableSilentMode, + disableSilentMode +} from '../../../../scripts/modules/utils.js'; /** * Direct function wrapper for updating tasks based on new context. diff --git a/mcp-server/src/tools/set-task-status.js b/mcp-server/src/tools/set-task-status.js index d92b1b1c..04ae9052 100644 --- a/mcp-server/src/tools/set-task-status.js +++ b/mcp-server/src/tools/set-task-status.js @@ -11,6 +11,7 @@ import { } from './utils.js'; import { setTaskStatusDirect } from '../core/task-master-core.js'; import { findTasksJsonPath } from '../core/utils/path-utils.js'; +import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js'; /** * Register the setTaskStatus tool with the MCP server @@ -27,7 +28,7 @@ export function registerSetTaskStatusTool(server) { "Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once." ), status: z - .string() + .enum(TASK_STATUS_OPTIONS) .describe( "New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'." ), diff --git a/package-lock.json b/package-lock.json index 342dd287..2a437a53 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "task-master-ai", - "version": "0.13.2-rc.1", + "version": "0.13.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "task-master-ai", - "version": "0.13.2-rc.1", + "version": "0.13.2", "license": "MIT WITH Commons-Clause", "dependencies": { "@ai-sdk/anthropic": "^1.2.10", diff --git a/package.json b/package.json index a9ef850d..039bcf41 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "task-master-ai", - "version": "0.13.2-rc.1", + "version": "0.13.2", "description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.", "main": "index.js", "type": "module", diff --git a/scripts/README.md b/scripts/README.md index 640703e4..1b83c635 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -32,7 +32,7 @@ The script can be configured through environment variables in a `.env` file at t - `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation - `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online") - `DEBUG`: Enable debug logging (default: false) -- `LOG_LEVEL`: Log level - debug, info, warn, error (default: info) +- `TASKMASTER_LOG_LEVEL`: Log level - debug, info, warn, error (default: info) - `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3) - `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium) - `PROJECT_NAME`: Override default project name in tasks.json @@ -47,7 +47,7 @@ The script can be configured through environment variables in a `.env` file at t - Tasks can have `subtasks` for more detailed implementation steps. - Dependencies are displayed with status indicators (✅ for completed, ⏱️ for pending) to easily track progress. -2. **Script Commands** +2. **Script Commands** You can run the script via: ```bash @@ -225,7 +225,7 @@ To use the Perplexity integration: ## Logging -The script supports different logging levels controlled by the `LOG_LEVEL` environment variable: +The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable: - `debug`: Detailed information, typically useful for troubleshooting - `info`: Confirmation that things are working as expected (default) diff --git a/scripts/init.js b/scripts/init.js index efe776d7..9f636314 100755 --- a/scripts/init.js +++ b/scripts/init.js @@ -38,10 +38,10 @@ const LOG_LEVELS = { success: 4 }; -// Get log level from environment or default to info -const LOG_LEVEL = process.env.LOG_LEVEL - ? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] - : LOG_LEVELS.info; +// Determine log level from environment variable or default to 'info' +const LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL + ? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()] + : LOG_LEVELS.info; // Default to info // Create a color gradient for the banner const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']); diff --git a/scripts/modules/ai-services-unified.js b/scripts/modules/ai-services-unified.js index 11a3ffc2..948c2a15 100644 --- a/scripts/modules/ai-services-unified.js +++ b/scripts/modules/ai-services-unified.js @@ -17,7 +17,8 @@ import { getParametersForRole, getUserId, MODEL_MAP, - getDebugFlag + getDebugFlag, + getBaseUrlForRole } from './config-manager.js'; import { log, resolveEnvVariable, isSilentMode } from './utils.js'; @@ -339,9 +340,15 @@ async function _unifiedServiceRunner(serviceType, params) { 'AI service call failed for all configured roles.'; for (const currentRole of sequence) { - let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn; - let providerResponse; - let telemetryData = null; + let providerName, + modelId, + apiKey, + roleParams, + providerFnSet, + providerApiFn, + baseUrl, + providerResponse, + telemetryData = null; try { log('info', `New AI service call with role: ${currentRole}`); @@ -382,6 +389,7 @@ async function _unifiedServiceRunner(serviceType, params) { // Pass effectiveProjectRoot to getParametersForRole roleParams = getParametersForRole(currentRole, effectiveProjectRoot); + baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot); // 2. Get Provider Function Set providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()]; @@ -458,6 +466,7 @@ async function _unifiedServiceRunner(serviceType, params) { maxTokens: roleParams.maxTokens, temperature: roleParams.temperature, messages, + baseUrl, ...(serviceType === 'generateObject' && { schema, objectName }), ...restApiParams }; diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index 3e480c4b..6d5542bb 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -74,7 +74,11 @@ import { getApiKeyStatusReport } from './task-manager/models.js'; import { findProjectRoot } from './utils.js'; - +import { + isValidTaskStatus, + TASK_STATUS_OPTIONS +} from '../../src/constants/task-status.js'; +import { getTaskMasterVersion } from '../../src/utils/getVersion.js'; /** * Runs the interactive setup process for model configuration. * @param {string|null} projectRoot - The resolved project root directory. @@ -487,11 +491,6 @@ function registerCommands(programInstance) { process.exit(1); }); - // Default help - programInstance.on('--help', function () { - displayHelp(); - }); - // parse-prd command programInstance .command('parse-prd') @@ -1039,7 +1038,7 @@ function registerCommands(programInstance) { ) .option( '-s, --status ', - 'New status (todo, in-progress, review, done)' + `New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})` ) .option('-f, --file ', 'Path to the tasks file', 'tasks/tasks.json') .action(async (options) => { @@ -1052,6 +1051,16 @@ function registerCommands(programInstance) { process.exit(1); } + if (!isValidTaskStatus(status)) { + console.error( + chalk.red( + `Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` + ) + ); + + process.exit(1); + } + console.log( chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`) ); @@ -1273,10 +1282,6 @@ function registerCommands(programInstance) { '--details
', 'Implementation details (for manual task creation)' ) - .option( - '--test-strategy ', - 'Test strategy (for manual task creation)' - ) .option( '--dependencies ', 'Comma-separated list of task IDs this task depends on' @@ -1654,6 +1659,7 @@ function registerCommands(programInstance) { } } catch (error) { console.error(chalk.red(`Error: ${error.message}`)); + showAddSubtaskHelp(); process.exit(1); } }) @@ -2357,14 +2363,7 @@ function setupCLI() { return 'unknown'; // Default fallback if package.json fails }) .helpOption('-h, --help', 'Display help') - .addHelpCommand(false) // Disable default help command - .on('--help', () => { - displayHelp(); // Use your custom help display instead - }) - .on('-h', () => { - displayHelp(); - process.exit(0); - }); + .addHelpCommand(false); // Disable default help command // Modify the help option to use your custom display programInstance.helpInformation = () => { @@ -2384,28 +2383,7 @@ function setupCLI() { */ async function checkForUpdate() { // Get current version from package.json ONLY - let currentVersion = 'unknown'; // Initialize with a default - try { - // Try to get the version from the installed package (if applicable) or current dir - let packageJsonPath = path.join( - process.cwd(), - 'node_modules', - 'task-master-ai', - 'package.json' - ); - // Fallback to current directory package.json if not found in node_modules - if (!fs.existsSync(packageJsonPath)) { - packageJsonPath = path.join(process.cwd(), 'package.json'); - } - - if (fs.existsSync(packageJsonPath)) { - const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); - currentVersion = packageJson.version; - } - } catch (error) { - // Silently fail and use default - log('debug', `Error reading current package version: ${error.message}`); - } + const currentVersion = getTaskMasterVersion(); return new Promise((resolve) => { // Get the latest version from npm registry diff --git a/scripts/modules/config-manager.js b/scripts/modules/config-manager.js index 16f06767..217b2faf 100644 --- a/scripts/modules/config-manager.js +++ b/scripts/modules/config-manager.js @@ -687,6 +687,13 @@ function getAllProviders() { return Object.keys(MODEL_MAP || {}); } +function getBaseUrlForRole(role, explicitRoot = null) { + const roleConfig = getModelConfigForRole(role, explicitRoot); + return roleConfig && typeof roleConfig.baseUrl === 'string' + ? roleConfig.baseUrl + : undefined; +} + export { // Core config access getConfig, @@ -714,6 +721,7 @@ export { getFallbackModelId, getFallbackMaxTokens, getFallbackTemperature, + getBaseUrlForRole, // Global setting getters (No env var overrides) getLogLevel, diff --git a/scripts/modules/task-manager/set-task-status.js b/scripts/modules/task-manager/set-task-status.js index f8b5fc3e..9278fdff 100644 --- a/scripts/modules/task-manager/set-task-status.js +++ b/scripts/modules/task-manager/set-task-status.js @@ -8,6 +8,10 @@ import { validateTaskDependencies } from '../dependency-manager.js'; import { getDebugFlag } from '../config-manager.js'; import updateSingleTaskStatus from './update-single-task-status.js'; import generateTaskFiles from './generate-task-files.js'; +import { + isValidTaskStatus, + TASK_STATUS_OPTIONS +} from '../../../src/constants/task-status.js'; /** * Set the status of a task @@ -19,6 +23,11 @@ import generateTaskFiles from './generate-task-files.js'; */ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) { try { + if (!isValidTaskStatus(newStatus)) { + throw new Error( + `Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` + ); + } // Determine if we're in MCP mode by checking for mcpLog const isMcpMode = !!options?.mcpLog; diff --git a/scripts/modules/task-manager/update-single-task-status.js b/scripts/modules/task-manager/update-single-task-status.js index e9839e3a..b8b5d3a2 100644 --- a/scripts/modules/task-manager/update-single-task-status.js +++ b/scripts/modules/task-manager/update-single-task-status.js @@ -1,6 +1,7 @@ import chalk from 'chalk'; import { log } from '../utils.js'; +import { isValidTaskStatus } from '../../../src/constants/task-status.js'; /** * Update the status of a single task @@ -17,6 +18,12 @@ async function updateSingleTaskStatus( data, showUi = true ) { + if (!isValidTaskStatus(newStatus)) { + throw new Error( + `Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` + ); + } + // Check if it's a subtask (e.g., "1.2") if (taskIdInput.includes('.')) { const [parentId, subtaskId] = taskIdInput diff --git a/scripts/modules/ui.js b/scripts/modules/ui.js index b9937bd5..60af7f57 100644 --- a/scripts/modules/ui.js +++ b/scripts/modules/ui.js @@ -16,10 +16,11 @@ import { truncate, isSilentMode } from './utils.js'; -import path from 'path'; import fs from 'fs'; import { findNextTask, analyzeTaskComplexity } from './task-manager.js'; import { getProjectName, getDefaultSubtasks } from './config-manager.js'; +import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js'; +import { getTaskMasterVersion } from '../../src/utils/getVersion.js'; // Create a color gradient for the banner const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']); @@ -46,17 +47,7 @@ function displayBanner() { ); // Read version directly from package.json - let version = 'unknown'; // Initialize with a default - try { - const packageJsonPath = path.join(process.cwd(), 'package.json'); - if (fs.existsSync(packageJsonPath)) { - const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); - version = packageJson.version; - } - } catch (error) { - // Silently fall back to default version - log('warn', 'Could not read package.json for version info.'); - } + const version = getTaskMasterVersion(); console.log( boxen( @@ -458,7 +449,7 @@ function displayHelp() { { name: 'set-status', args: '--id= --status=', - desc: 'Update task status (done, pending, etc.)' + desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})` }, { name: 'update', @@ -809,12 +800,7 @@ async function displayNextTask(tasksPath) { 'padding-bottom': 0, compact: true }, - chars: { - mid: '', - 'left-mid': '', - 'mid-mid': '', - 'right-mid': '' - }, + chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' }, colWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)], wordWrap: true }); @@ -905,12 +891,7 @@ async function displayNextTask(tasksPath) { 'padding-bottom': 0, compact: true }, - chars: { - mid: '', - 'left-mid': '', - 'mid-mid': '', - 'right-mid': '' - }, + chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' }, wordWrap: true }); diff --git a/src/ai-providers/anthropic.js b/src/ai-providers/anthropic.js index bad8105d..080cc43b 100644 --- a/src/ai-providers/anthropic.js +++ b/src/ai-providers/anthropic.js @@ -5,7 +5,7 @@ * using the Vercel AI SDK. */ import { createAnthropic } from '@ai-sdk/anthropic'; -import { generateText, streamText, generateObject, streamObject } from 'ai'; +import { generateText, streamText, generateObject } from 'ai'; import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible // TODO: Implement standardized functions for generateText, streamText, generateObject @@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces // Remove the global variable and caching logic // let anthropicClient; -function getClient(apiKey) { +function getClient(apiKey, baseUrl) { if (!apiKey) { // In a real scenario, this would use the config resolver. // Throwing error here if key isn't passed for simplicity. @@ -30,14 +30,12 @@ function getClient(apiKey) { // Create and return a new instance directly with standard version header return createAnthropic({ apiKey: apiKey, - baseURL: 'https://api.anthropic.com/v1', + ...(baseUrl && { baseURL: baseUrl }), // Use standard version header instead of beta headers: { 'anthropic-beta': 'output-128k-2025-02-19' } }); - // } - // return anthropicClient; } // --- Standardized Service Function Implementations --- @@ -51,6 +49,7 @@ function getClient(apiKey) { * @param {Array} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]). * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - The base URL for the Anthropic API. * @returns {Promise} The generated text content and usage. * @throws {Error} If the API call fails. */ @@ -59,11 +58,12 @@ export async function generateAnthropicText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Generating Anthropic text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const result = await generateText({ model: client(modelId), messages: messages, @@ -100,6 +100,7 @@ export async function generateAnthropicText({ * @param {Array} params.messages - The messages array. * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - The base URL for the Anthropic API. * @returns {Promise} The full stream result object from the Vercel AI SDK. * @throws {Error} If the API call fails to initiate the stream. */ @@ -108,20 +109,20 @@ export async function streamAnthropicText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Streaming Anthropic text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); - // --- DEBUG LOGGING --- >> log( 'debug', '[streamAnthropicText] Parameters received by streamText:', JSON.stringify( { - modelId: modelId, // Log modelId being used - messages: messages, // Log the messages array + modelId: modelId, + messages: messages, maxTokens: maxTokens, temperature: temperature }, @@ -129,25 +130,19 @@ export async function streamAnthropicText({ 2 ) ); - // --- << DEBUG LOGGING --- const stream = await streamText({ model: client(modelId), messages: messages, maxTokens: maxTokens, temperature: temperature - // Beta header moved to client initialization // TODO: Add other relevant parameters }); // *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream *** return stream; } catch (error) { - log( - 'error', - `Anthropic streamText failed: ${error.message}`, - error.stack // Log stack trace for more details - ); + log('error', `Anthropic streamText failed: ${error.message}`, error.stack); throw error; } } @@ -167,6 +162,7 @@ export async function streamAnthropicText({ * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.maxRetries] - Max retries for validation/generation. + * @param {string} [params.baseUrl] - The base URL for the Anthropic API. * @returns {Promise} The generated object matching the schema and usage. * @throws {Error} If generation or validation fails. */ @@ -178,24 +174,22 @@ export async function generateAnthropicObject({ objectName = 'generated_object', maxTokens, temperature, - maxRetries = 3 + maxRetries = 3, + baseUrl }) { log( 'debug', `Generating Anthropic object ('${objectName}') with model: ${modelId}` ); try { - const client = getClient(apiKey); - - // Log basic debug info + const client = getClient(apiKey, baseUrl); log( 'debug', `Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}` ); - const result = await generateObject({ model: client(modelId), - mode: 'tool', // Anthropic generally uses 'tool' mode for structured output + mode: 'tool', schema: schema, messages: messages, tool: { @@ -206,7 +200,6 @@ export async function generateAnthropicObject({ temperature: temperature, maxRetries: maxRetries }); - log( 'debug', `Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}` @@ -220,7 +213,6 @@ export async function generateAnthropicObject({ } }; } catch (error) { - // Simple error logging log( 'error', `Anthropic generateObject ('${objectName}') failed: ${error.message}` diff --git a/src/ai-providers/google.js b/src/ai-providers/google.js index 676dc4ec..108cd8f4 100644 --- a/src/ai-providers/google.js +++ b/src/ai-providers/google.js @@ -12,6 +12,16 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility const DEFAULT_MODEL = 'gemini-2.5-pro-exp-03-25'; // Or a suitable default const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default +function getClient(apiKey, baseUrl) { + if (!apiKey) { + throw new Error('Google API key is required.'); + } + return createGoogleGenerativeAI({ + apiKey: apiKey, + ...(baseUrl && { baseURL: baseUrl }) + }); +} + /** * Generates text using a Google AI model. * @@ -29,7 +39,8 @@ async function generateGoogleText({ modelId = DEFAULT_MODEL, temperature = DEFAULT_TEMPERATURE, messages, - maxTokens // Note: Vercel SDK might handle this differently, needs verification + maxTokens, + baseUrl }) { if (!apiKey) { throw new Error('Google API key is required.'); @@ -37,18 +48,13 @@ async function generateGoogleText({ log('info', `Generating text with Google model: ${modelId}`); try { - // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation - const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation - // const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval - const model = googleProvider(modelId); // Correct model retrieval - - // Construct payload suitable for Vercel SDK's generateText - // Note: The exact structure might depend on how messages are passed + const googleProvider = getClient(apiKey, baseUrl); + const model = googleProvider(modelId); const result = await generateText({ - model, // Pass the model instance - messages, // Pass the messages array directly + model, + messages, temperature, - maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available + maxOutputTokens: maxTokens }); // Assuming result structure provides text directly or within a property @@ -66,7 +72,7 @@ async function generateGoogleText({ 'error', `Error generating text with Google (${modelId}): ${error.message}` ); - throw error; // Re-throw for unified service handler + throw error; } } @@ -87,7 +93,8 @@ async function streamGoogleText({ modelId = DEFAULT_MODEL, temperature = DEFAULT_TEMPERATURE, messages, - maxTokens + maxTokens, + baseUrl }) { if (!apiKey) { throw new Error('Google API key is required.'); @@ -95,19 +102,15 @@ async function streamGoogleText({ log('info', `Streaming text with Google model: ${modelId}`); try { - // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation - const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation - // const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval - const model = googleProvider(modelId); // Correct model retrieval - + const googleProvider = getClient(apiKey, baseUrl); + const model = googleProvider(modelId); const stream = await streamText({ - model, // Pass the model instance + model, messages, temperature, maxOutputTokens: maxTokens }); - - return stream; // Return the stream directly + return stream; } catch (error) { log( 'error', @@ -138,7 +141,8 @@ async function generateGoogleObject({ messages, schema, objectName, // Note: Vercel SDK might use this differently or not at all - maxTokens + maxTokens, + baseUrl }) { if (!apiKey) { throw new Error('Google API key is required.'); @@ -146,20 +150,14 @@ async function generateGoogleObject({ log('info', `Generating object with Google model: ${modelId}`); try { - // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation - const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation - // const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval - const model = googleProvider(modelId); // Correct model retrieval - + const googleProvider = getClient(apiKey, baseUrl); + const model = googleProvider(modelId); const result = await generateObject({ - model, // Pass the model instance + model, schema, messages, temperature, maxOutputTokens: maxTokens - // Note: 'objectName' or 'mode' might not be directly applicable here - // depending on how `@ai-sdk/google` handles `generateObject`. - // Check SDK docs if specific tool calling/JSON mode needs explicit setup. }); // return object; // Return the parsed object diff --git a/src/ai-providers/openai.js b/src/ai-providers/openai.js index 22cb9620..dae77229 100644 --- a/src/ai-providers/openai.js +++ b/src/ai-providers/openai.js @@ -1,16 +1,26 @@ -import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK -import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai' +import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK +import { generateObject } from 'ai'; // Import necessary functions from 'ai' import { log } from '../../scripts/modules/utils.js'; +function getClient(apiKey, baseUrl) { + if (!apiKey) { + throw new Error('OpenAI API key is required.'); + } + return createOpenAI({ + apiKey: apiKey, + ...(baseUrl && { baseURL: baseUrl }) + }); +} + /** * Generates text using OpenAI models via Vercel AI SDK. * - * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature. + * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl. * @returns {Promise} The generated text content and usage. * @throws {Error} If API call fails. */ export async function generateOpenAIText(params) { - const { apiKey, modelId, messages, maxTokens, temperature } = params; + const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params; log('debug', `generateOpenAIText called with model: ${modelId}`); if (!apiKey) { @@ -23,7 +33,7 @@ export async function generateOpenAIText(params) { throw new Error('Invalid or empty messages array provided for OpenAI.'); } - const openaiClient = createOpenAI({ apiKey }); + const openaiClient = getClient(apiKey, baseUrl); try { const result = await generateText({ @@ -67,12 +77,12 @@ export async function generateOpenAIText(params) { /** * Streams text using OpenAI models via Vercel AI SDK. * - * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature. + * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl. * @returns {Promise} A readable stream of text deltas. * @throws {Error} If API call fails. */ export async function streamOpenAIText(params) { - const { apiKey, modelId, messages, maxTokens, temperature } = params; + const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params; log('debug', `streamOpenAIText called with model: ${modelId}`); if (!apiKey) { @@ -87,7 +97,7 @@ export async function streamOpenAIText(params) { ); } - const openaiClient = createOpenAI({ apiKey }); + const openaiClient = getClient(apiKey, baseUrl); try { const stream = await openaiClient.chat.stream(messages, { @@ -116,7 +126,7 @@ export async function streamOpenAIText(params) { /** * Generates structured objects using OpenAI models via Vercel AI SDK. * - * @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature. + * @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl. * @returns {Promise} The generated object matching the schema and usage. * @throws {Error} If API call fails or object generation fails. */ @@ -128,7 +138,8 @@ export async function generateOpenAIObject(params) { schema, objectName, maxTokens, - temperature + temperature, + baseUrl } = params; log( 'debug', @@ -144,7 +155,7 @@ export async function generateOpenAIObject(params) { if (!objectName) throw new Error('Object name is required for OpenAI object generation.'); - const openaiClient = createOpenAI({ apiKey }); + const openaiClient = getClient(apiKey, baseUrl); try { const result = await generateObject({ diff --git a/src/ai-providers/openrouter.js b/src/ai-providers/openrouter.js index 36b7f2fc..af9c9fef 100644 --- a/src/ai-providers/openrouter.js +++ b/src/ai-providers/openrouter.js @@ -2,6 +2,14 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider'; import { generateText, streamText, generateObject } from 'ai'; import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules +function getClient(apiKey, baseUrl) { + if (!apiKey) throw new Error('OpenRouter API key is required.'); + return createOpenRouter({ + apiKey, + ...(baseUrl && { baseURL: baseUrl }) + }); +} + /** * Generates text using an OpenRouter chat model. * @@ -11,6 +19,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in * @param {Array} params.messages - Array of message objects (system, user, assistant). * @param {number} [params.maxTokens] - Maximum tokens to generate. * @param {number} [params.temperature] - Sampling temperature. + * @param {string} [params.baseUrl] - Base URL for the OpenRouter API. * @returns {Promise} The generated text content. * @throws {Error} If the API call fails. */ @@ -20,6 +29,7 @@ async function generateOpenRouterText({ messages, maxTokens, temperature, + baseUrl, ...rest // Capture any other Vercel AI SDK compatible parameters }) { if (!apiKey) throw new Error('OpenRouter API key is required.'); @@ -28,7 +38,7 @@ async function generateOpenRouterText({ throw new Error('Messages array cannot be empty.'); try { - const openrouter = createOpenRouter({ apiKey }); + const openrouter = getClient(apiKey, baseUrl); const model = openrouter.chat(modelId); // Assuming chat model // Capture the full result from generateText @@ -85,6 +95,7 @@ async function generateOpenRouterText({ * @param {Array} params.messages - Array of message objects (system, user, assistant). * @param {number} [params.maxTokens] - Maximum tokens to generate. * @param {number} [params.temperature] - Sampling temperature. + * @param {string} [params.baseUrl] - Base URL for the OpenRouter API. * @returns {Promise>} A readable stream of text deltas. * @throws {Error} If the API call fails. */ @@ -94,6 +105,7 @@ async function streamOpenRouterText({ messages, maxTokens, temperature, + baseUrl, ...rest }) { if (!apiKey) throw new Error('OpenRouter API key is required.'); @@ -102,7 +114,7 @@ async function streamOpenRouterText({ throw new Error('Messages array cannot be empty.'); try { - const openrouter = createOpenRouter({ apiKey }); + const openrouter = getClient(apiKey, baseUrl); const model = openrouter.chat(modelId); // Directly return the stream from the Vercel AI SDK function @@ -135,6 +147,7 @@ async function streamOpenRouterText({ * @param {number} [params.maxRetries=3] - Max retries for object generation. * @param {number} [params.maxTokens] - Maximum tokens. * @param {number} [params.temperature] - Temperature. + * @param {string} [params.baseUrl] - Base URL for the OpenRouter API. * @returns {Promise} The generated object matching the schema. * @throws {Error} If the API call fails or validation fails. */ @@ -147,6 +160,7 @@ async function generateOpenRouterObject({ maxRetries = 3, maxTokens, temperature, + baseUrl, ...rest }) { if (!apiKey) throw new Error('OpenRouter API key is required.'); @@ -156,7 +170,7 @@ async function generateOpenRouterObject({ throw new Error('Messages array cannot be empty.'); try { - const openrouter = createOpenRouter({ apiKey }); + const openrouter = getClient(apiKey, baseUrl); const model = openrouter.chat(modelId); // Capture the full result from generateObject diff --git a/src/ai-providers/perplexity.js b/src/ai-providers/perplexity.js index 6e64c556..51481204 100644 --- a/src/ai-providers/perplexity.js +++ b/src/ai-providers/perplexity.js @@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js'; // --- Client Instantiation --- // Similar to Anthropic, this expects the resolved API key to be passed in. -function getClient(apiKey) { +function getClient(apiKey, baseUrl) { if (!apiKey) { throw new Error('Perplexity API key is required.'); } - // Create and return a new instance directly return createPerplexity({ - apiKey: apiKey + apiKey: apiKey, + ...(baseUrl && { baseURL: baseUrl }) }); } @@ -31,6 +31,7 @@ function getClient(apiKey) { * @param {Array} params.messages - The messages array. * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - Base URL for the Perplexity API. * @returns {Promise} The generated text content. * @throws {Error} If the API call fails. */ @@ -39,11 +40,12 @@ export async function generatePerplexityText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Generating Perplexity text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const result = await generateText({ model: client(modelId), messages: messages, @@ -77,6 +79,7 @@ export async function generatePerplexityText({ * @param {Array} params.messages - The messages array. * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - Base URL for the Perplexity API. * @returns {Promise} The full stream result object from the Vercel AI SDK. * @throws {Error} If the API call fails to initiate the stream. */ @@ -85,11 +88,12 @@ export async function streamPerplexityText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Streaming Perplexity text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const stream = await streamText({ model: client(modelId), messages: messages, @@ -119,6 +123,7 @@ export async function streamPerplexityText({ * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.maxRetries] - Max retries for validation/generation. + * @param {string} [params.baseUrl] - Base URL for the Perplexity API. * @returns {Promise} The generated object matching the schema. * @throws {Error} If generation or validation fails or is unsupported. */ @@ -130,7 +135,8 @@ export async function generatePerplexityObject({ objectName = 'generated_object', maxTokens, temperature, - maxRetries = 1 // Lower retries as support might be limited + maxRetries = 1, + baseUrl }) { log( 'debug', @@ -141,8 +147,7 @@ export async function generatePerplexityObject({ 'generateObject support for Perplexity might be limited or experimental.' ); try { - const client = getClient(apiKey); - // Attempt using generateObject, but be prepared for potential issues + const client = getClient(apiKey, baseUrl); const result = await generateObject({ model: client(modelId), schema: schema, diff --git a/src/ai-providers/xai.js b/src/ai-providers/xai.js index 94a58ef0..5969060b 100644 --- a/src/ai-providers/xai.js +++ b/src/ai-providers/xai.js @@ -9,14 +9,13 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible // --- Client Instantiation --- -function getClient(apiKey) { +function getClient(apiKey, baseUrl) { if (!apiKey) { throw new Error('xAI API key is required.'); } - // Create and return a new instance directly return createXai({ - apiKey: apiKey - // Add baseURL or other options if needed later + apiKey: apiKey, + ...(baseUrl && { baseURL: baseUrl }) }); } @@ -31,6 +30,7 @@ function getClient(apiKey) { * @param {Array} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]). * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - The base URL for the xAI API. * @returns {Promise} The generated text content and usage. * @throws {Error} If the API call fails. */ @@ -39,13 +39,14 @@ export async function generateXaiText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Generating xAI text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const result = await generateText({ - model: client(modelId), // Correct model invocation + model: client(modelId), messages: messages, maxTokens: maxTokens, temperature: temperature @@ -77,6 +78,7 @@ export async function generateXaiText({ * @param {Array} params.messages - The messages array. * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. + * @param {string} [params.baseUrl] - The base URL for the xAI API. * @returns {Promise} The full stream result object from the Vercel AI SDK. * @throws {Error} If the API call fails to initiate the stream. */ @@ -85,18 +87,19 @@ export async function streamXaiText({ modelId, messages, maxTokens, - temperature + temperature, + baseUrl }) { log('debug', `Streaming xAI text with model: ${modelId}`); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const stream = await streamText({ - model: client(modelId), // Correct model invocation + model: client(modelId), messages: messages, maxTokens: maxTokens, temperature: temperature }); - return stream; // Return the full stream object + return stream; } catch (error) { log('error', `xAI streamText failed: ${error.message}`, error.stack); throw error; @@ -117,6 +120,7 @@ export async function streamXaiText({ * @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.maxRetries] - Max retries for validation/generation. + * @param {string} [params.baseUrl] - The base URL for the xAI API. * @returns {Promise} The generated object matching the schema and its usage. * @throws {Error} If generation or validation fails. */ @@ -128,16 +132,17 @@ export async function generateXaiObject({ objectName = 'generated_xai_object', maxTokens, temperature, - maxRetries = 3 + maxRetries = 3, + baseUrl }) { log( - 'warn', // Log warning as this is likely unsupported + 'warn', `Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.` ); try { - const client = getClient(apiKey); + const client = getClient(apiKey, baseUrl); const result = await generateObject({ - model: client(modelId), // Correct model invocation + model: client(modelId), // Note: mode might need adjustment if xAI ever supports object generation differently mode: 'tool', schema: schema, @@ -168,6 +173,6 @@ export async function generateXaiObject({ 'error', `xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)` ); - throw error; // Re-throw the error + throw error; } } diff --git a/src/constants/task-status.js b/src/constants/task-status.js new file mode 100644 index 00000000..ebad5a16 --- /dev/null +++ b/src/constants/task-status.js @@ -0,0 +1,32 @@ +/** + * @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus + */ + +/** + * Task status options list + * @type {TaskStatus[]} + * @description Defines possible task statuses: + * - pending: Task waiting to start + * - done: Task completed + * - in-progress: Task in progress + * - review: Task completed and waiting for review + * - deferred: Task postponed or paused + * - cancelled: Task cancelled and will not be completed + */ +export const TASK_STATUS_OPTIONS = [ + 'pending', + 'done', + 'in-progress', + 'review', + 'deferred', + 'cancelled' +]; + +/** + * Check if a given status is a valid task status + * @param {string} status - The status to check + * @returns {boolean} True if the status is valid, false otherwise + */ +export function isValidTaskStatus(status) { + return TASK_STATUS_OPTIONS.includes(status); +} diff --git a/src/utils/getVersion.js b/src/utils/getVersion.js new file mode 100644 index 00000000..55a64f40 --- /dev/null +++ b/src/utils/getVersion.js @@ -0,0 +1,35 @@ +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; +import { log } from '../../scripts/modules/utils.js'; + +/** + * Reads the version from the nearest package.json relative to this file. + * Returns 'unknown' if not found or on error. + * @returns {string} The version string or 'unknown'. + */ +export function getTaskMasterVersion() { + let version = 'unknown'; + try { + // Get the directory of the current module (getPackageVersion.js) + const currentModuleFilename = fileURLToPath(import.meta.url); + const currentModuleDirname = path.dirname(currentModuleFilename); + // Construct the path to package.json relative to this file (../../package.json) + const packageJsonPath = path.join( + currentModuleDirname, + '..', + '..', + 'package.json' + ); + + if (fs.existsSync(packageJsonPath)) { + const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8'); + const packageJson = JSON.parse(packageJsonContent); + version = packageJson.version; + } + } catch (error) { + // Silently fall back to default version + log('warn', 'Could not read own package.json for version info.', error); + } + return version; +} diff --git a/tests/setup.js b/tests/setup.js index 8dedeacd..81e11109 100644 --- a/tests/setup.js +++ b/tests/setup.js @@ -9,7 +9,7 @@ process.env.MODEL = 'sonar-pro'; process.env.MAX_TOKENS = '64000'; process.env.TEMPERATURE = '0.2'; process.env.DEBUG = 'false'; -process.env.LOG_LEVEL = 'error'; // Set to error to reduce noise in tests +process.env.TASKMASTER_LOG_LEVEL = 'error'; // Set to error to reduce noise in tests process.env.DEFAULT_SUBTASKS = '5'; process.env.DEFAULT_PRIORITY = 'medium'; process.env.PROJECT_NAME = 'Test Project'; diff --git a/tests/unit/ai-services-unified.test.js b/tests/unit/ai-services-unified.test.js index d34806a7..4cd2b4df 100644 --- a/tests/unit/ai-services-unified.test.js +++ b/tests/unit/ai-services-unified.test.js @@ -32,6 +32,7 @@ const mockModelMap = { ] // Add other providers/models if needed for specific tests }; +const mockGetBaseUrlForRole = jest.fn(); jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({ getMainProvider: mockGetMainProvider, @@ -43,7 +44,8 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({ getParametersForRole: mockGetParametersForRole, getUserId: mockGetUserId, getDebugFlag: mockGetDebugFlag, - MODEL_MAP: mockModelMap + MODEL_MAP: mockModelMap, + getBaseUrlForRole: mockGetBaseUrlForRole })); // Mock AI Provider Modules diff --git a/tests/unit/task-manager.test.js b/tests/unit/task-manager.test.js index fcba1be3..ec4725e5 100644 --- a/tests/unit/task-manager.test.js +++ b/tests/unit/task-manager.test.js @@ -199,6 +199,12 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => { // Simplified version of updateSingleTaskStatus for testing const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => { + if (!isValidTaskStatus(newStatus)) { + throw new Error( + `Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}` + ); + } + // Check if it's a subtask (e.g., "1.2") if (taskIdInput.includes('.')) { const [parentId, subtaskId] = taskIdInput @@ -329,6 +335,10 @@ const testAddTask = ( import * as taskManager from '../../scripts/modules/task-manager.js'; import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js'; import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js'; +import { + isValidTaskStatus, + TASK_STATUS_OPTIONS +} from '../../src/constants/task-status.js'; // Destructure the required functions for convenience const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } = @@ -1165,6 +1175,16 @@ describe('Task Manager Module', () => { expect(testTasksData.tasks[1].status).toBe('done'); }); + test('should throw error for invalid status', async () => { + // Arrange + const testTasksData = JSON.parse(JSON.stringify(sampleTasks)); + + // Assert + expect(() => + testUpdateSingleTaskStatus(testTasksData, '2', 'Done') + ).toThrow(/Error: Invalid status value: Done./); + }); + test('should update subtask status', async () => { // Arrange const testTasksData = JSON.parse(JSON.stringify(sampleTasks));