Merge remote-tracking branch 'origin/next' into telemetry
This commit is contained in:
9
.changeset/floppy-plants-marry.md
Normal file
9
.changeset/floppy-plants-marry.md
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix CLI --force flag for parse-prd command
|
||||||
|
|
||||||
|
Previously, the --force flag was not respected when running `parse-prd`, causing the command to prompt for confirmation or fail even when --force was provided. This patch ensures that the flag is correctly passed and handled, allowing users to overwrite existing tasks.json files as intended.
|
||||||
|
|
||||||
|
- Fixes #477
|
||||||
5
.changeset/forty-plums-stay.md
Normal file
5
.changeset/forty-plums-stay.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': minor
|
||||||
|
---
|
||||||
|
|
||||||
|
.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider.
|
||||||
5
.changeset/many-wasps-sell.md
Normal file
5
.changeset/many-wasps-sell.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Task Master no longer tells you to update when you're already up to date
|
||||||
@@ -1,30 +1,12 @@
|
|||||||
{
|
{
|
||||||
"mode": "pre",
|
"mode": "exit",
|
||||||
"tag": "rc",
|
"tag": "rc",
|
||||||
"initialVersions": {
|
"initialVersions": {
|
||||||
"task-master-ai": "0.13.1"
|
"task-master-ai": "0.13.2"
|
||||||
},
|
},
|
||||||
"changesets": [
|
"changesets": [
|
||||||
"beige-doodles-type",
|
"beige-doodles-type",
|
||||||
"beige-rats-accept",
|
|
||||||
"blue-spies-kick",
|
|
||||||
"cuddly-zebras-matter",
|
|
||||||
"curvy-candies-eat",
|
|
||||||
"easy-toys-wash",
|
|
||||||
"every-stars-sell",
|
|
||||||
"fine-monkeys-eat",
|
|
||||||
"fine-signs-add",
|
|
||||||
"gentle-views-jump",
|
|
||||||
"mighty-mirrors-watch",
|
|
||||||
"neat-donkeys-shave",
|
|
||||||
"nine-rocks-sink",
|
|
||||||
"ninety-ghosts-relax",
|
|
||||||
"ninety-wombats-pull",
|
|
||||||
"public-cooks-fetch",
|
|
||||||
"red-oranges-attend",
|
"red-oranges-attend",
|
||||||
"red-suns-wash",
|
"red-suns-wash"
|
||||||
"tricky-papayas-hang",
|
|
||||||
"violet-papayas-see",
|
|
||||||
"violet-parrots-march"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
5
.changeset/sharp-dingos-melt.md
Normal file
5
.changeset/sharp-dingos-melt.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix the error handling of task status settings
|
||||||
5
.changeset/slow-singers-swim.md
Normal file
5
.changeset/slow-singers-swim.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix for issue #409 LOG_LEVEL Pydantic validation error
|
||||||
7
.changeset/soft-zoos-flow.md
Normal file
7
.changeset/soft-zoos-flow.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix initial .env.example to work out of the box
|
||||||
|
|
||||||
|
- Closes #419
|
||||||
5
.changeset/ten-ways-mate.md
Normal file
5
.changeset/ten-ways-mate.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix default fallback model and maxTokens in Taskmaster initialization
|
||||||
5
.changeset/tricky-wombats-spend.md
Normal file
5
.changeset/tricky-wombats-spend.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix bug when updating tasks on the MCP server (#412)
|
||||||
11
.changeset/wide-eyes-relax.md
Normal file
11
.changeset/wide-eyes-relax.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
'task-master-ai': patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix duplicate output on CLI help screen
|
||||||
|
|
||||||
|
- Prevent the Task Master CLI from printing the help screen more than once when using `-h` or `--help`.
|
||||||
|
- Removed redundant manual event handlers and guards for help output; now only the Commander `.helpInformation` override is used for custom help.
|
||||||
|
- Simplified logic so that help is only shown once for both "no arguments" and help flag flows.
|
||||||
|
- Ensures a clean, branded help experience with no repeated content.
|
||||||
|
- Fixes #339
|
||||||
@@ -116,7 +116,7 @@ Taskmaster configuration is managed through two main mechanisms:
|
|||||||
* For MCP/Cursor integration, configure these keys in the `env` section of `.cursor/mcp.json`.
|
* For MCP/Cursor integration, configure these keys in the `env` section of `.cursor/mcp.json`.
|
||||||
* Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.mdc`).
|
* Available keys/variables: See `assets/env.example` or the Configuration section in the command reference (previously linked to `taskmaster.mdc`).
|
||||||
|
|
||||||
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
|
**Important:** Non-API key settings (like model selections, `MAX_TOKENS`, `TASKMASTER_LOG_LEVEL`) are **no longer configured via environment variables**. Use the `task-master models` command (or `--setup` for interactive configuration) or the `models` MCP tool.
|
||||||
**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.cursor/mcp.json`.
|
**If AI commands FAIL in MCP** verify that the API key for the selected provider is present in the `env` section of `.cursor/mcp.json`.
|
||||||
**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.
|
**If AI commands FAIL in CLI** verify that the API key for the selected provider is present in the `.env` file in the root of the project.
|
||||||
|
|
||||||
|
|||||||
23
.github/workflows/pre-release.yml
vendored
23
.github/workflows/pre-release.yml
vendored
@@ -2,9 +2,6 @@ name: Pre-Release (RC)
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch: # Allows manual triggering from GitHub UI/API
|
workflow_dispatch: # Allows manual triggering from GitHub UI/API
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'next'
|
|
||||||
|
|
||||||
concurrency: pre-release-${{ github.ref }}
|
concurrency: pre-release-${{ github.ref }}
|
||||||
|
|
||||||
@@ -41,12 +38,10 @@ jobs:
|
|||||||
npx changeset pre enter rc
|
npx changeset pre enter rc
|
||||||
|
|
||||||
- name: Version RC packages
|
- name: Version RC packages
|
||||||
run: |
|
run: npx changeset version
|
||||||
git config user.name "GitHub Actions"
|
env:
|
||||||
git config user.email "github-actions@example.com"
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
npx changeset version
|
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||||
git add .
|
|
||||||
git commit -m "chore: rc version bump" || echo "No changes to commit"
|
|
||||||
|
|
||||||
- name: Create Release Candidate Pull Request or Publish Release Candidate to npm
|
- name: Create Release Candidate Pull Request or Publish Release Candidate to npm
|
||||||
uses: changesets/action@v1
|
uses: changesets/action@v1
|
||||||
@@ -55,3 +50,13 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||||
|
|
||||||
|
- name: Exit RC mode
|
||||||
|
run: npx changeset pre exit
|
||||||
|
|
||||||
|
- name: Commit & Push changes
|
||||||
|
uses: actions-js/push@master
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
branch: ${{ github.ref }}
|
||||||
|
message: 'chore: rc version bump'
|
||||||
|
|||||||
@@ -14,8 +14,8 @@
|
|||||||
},
|
},
|
||||||
"fallback": {
|
"fallback": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3.5-sonnet-20240620",
|
"modelId": "claude-3-5-sonnet-20240620",
|
||||||
"maxTokens": 120000,
|
"maxTokens": 8192,
|
||||||
"temperature": 0.1
|
"temperature": 0.1
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -198,7 +198,7 @@ alwaysApply: true
|
|||||||
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
|
- **MAX_TOKENS** (Default: `"4000"`): Maximum tokens for responses (Example: `MAX_TOKENS=8000`)
|
||||||
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
|
- **TEMPERATURE** (Default: `"0.7"`): Temperature for model responses (Example: `TEMPERATURE=0.5`)
|
||||||
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
|
- **DEBUG** (Default: `"false"`): Enable debug logging (Example: `DEBUG=true`)
|
||||||
- **LOG_LEVEL** (Default: `"info"`): Console output level (Example: `LOG_LEVEL=debug`)
|
- **TASKMASTER_LOG_LEVEL** (Default: `"info"`): Console output level (Example: `TASKMASTER_LOG_LEVEL=debug`)
|
||||||
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
|
- **DEFAULT_SUBTASKS** (Default: `"3"`): Default subtask count (Example: `DEFAULT_SUBTASKS=5`)
|
||||||
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
|
- **DEFAULT_PRIORITY** (Default: `"medium"`): Default priority (Example: `DEFAULT_PRIORITY=high`)
|
||||||
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
|
- **PROJECT_NAME** (Default: `"MCP SaaS MVP"`): Project name in metadata (Example: `PROJECT_NAME=My Awesome Project`)
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
# API Keys (Required to enable respective provider)
|
# API Keys (Required to enable respective provider)
|
||||||
ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-...
|
ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Required: Format: sk-ant-api03-...
|
||||||
PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-...
|
PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Optional: Format: pplx-...
|
||||||
OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
|
OPENAI_API_KEY="your_openai_api_key_here" # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
|
||||||
GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models.
|
GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models.
|
||||||
MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models.
|
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
|
||||||
XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models.
|
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
|
||||||
AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
|
||||||
@@ -31,7 +31,7 @@ Task Master configuration is now managed through two primary methods:
|
|||||||
- Create a `.env` file in your project root for CLI usage.
|
- Create a `.env` file in your project root for CLI usage.
|
||||||
- See `assets/env.example` for required key names.
|
- See `assets/env.example` for required key names.
|
||||||
|
|
||||||
**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.
|
**Important:** Settings like `MODEL`, `MAX_TOKENS`, `TEMPERATURE`, `TASKMASTER_LOG_LEVEL`, etc., are **no longer set via `.env`**. Use `task-master models --setup` instead.
|
||||||
|
|
||||||
## How It Works
|
## How It Works
|
||||||
|
|
||||||
@@ -200,7 +200,7 @@ Notes:
|
|||||||
|
|
||||||
## Logging
|
## Logging
|
||||||
|
|
||||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:
|
||||||
|
|
||||||
- `debug`: Detailed information, typically useful for troubleshooting
|
- `debug`: Detailed information, typically useful for troubleshooting
|
||||||
- `info`: Confirmation that things are working as expected (default)
|
- `info`: Confirmation that things are working as expected (default)
|
||||||
|
|||||||
@@ -15,13 +15,15 @@ Taskmaster uses two primary methods for configuration:
|
|||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3-7-sonnet-20250219",
|
"modelId": "claude-3-7-sonnet-20250219",
|
||||||
"maxTokens": 64000,
|
"maxTokens": 64000,
|
||||||
"temperature": 0.2
|
"temperature": 0.2,
|
||||||
|
"baseUrl": "https://api.anthropic.com/v1"
|
||||||
},
|
},
|
||||||
"research": {
|
"research": {
|
||||||
"provider": "perplexity",
|
"provider": "perplexity",
|
||||||
"modelId": "sonar-pro",
|
"modelId": "sonar-pro",
|
||||||
"maxTokens": 8700,
|
"maxTokens": 8700,
|
||||||
"temperature": 0.1
|
"temperature": 0.1,
|
||||||
|
"baseUrl": "https://api.perplexity.ai/v1"
|
||||||
},
|
},
|
||||||
"fallback": {
|
"fallback": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
@@ -56,8 +58,9 @@ Taskmaster uses two primary methods for configuration:
|
|||||||
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
||||||
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
||||||
- `XAI_API_KEY`: Your X-AI API key.
|
- `XAI_API_KEY`: Your X-AI API key.
|
||||||
- **Optional Endpoint Overrides (in .taskmasterconfig):**
|
- **Optional Endpoint Overrides:**
|
||||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key.
|
- **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||||
|
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role).
|
||||||
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
||||||
|
|
||||||
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
||||||
|
|||||||
@@ -6,6 +6,10 @@
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
import { updateTasks } from '../../../../scripts/modules/task-manager.js';
|
||||||
import { createLogWrapper } from '../../tools/utils.js';
|
import { createLogWrapper } from '../../tools/utils.js';
|
||||||
|
import {
|
||||||
|
enableSilentMode,
|
||||||
|
disableSilentMode
|
||||||
|
} from '../../../../scripts/modules/utils.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Direct function wrapper for updating tasks based on new context.
|
* Direct function wrapper for updating tasks based on new context.
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import {
|
|||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||||
|
import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the setTaskStatus tool with the MCP server
|
* Register the setTaskStatus tool with the MCP server
|
||||||
@@ -27,7 +28,7 @@ export function registerSetTaskStatusTool(server) {
|
|||||||
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
||||||
),
|
),
|
||||||
status: z
|
status: z
|
||||||
.string()
|
.enum(TASK_STATUS_OPTIONS)
|
||||||
.describe(
|
.describe(
|
||||||
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
||||||
),
|
),
|
||||||
|
|||||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.13.2-rc.1",
|
"version": "0.13.2",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.13.2-rc.1",
|
"version": "0.13.2",
|
||||||
"license": "MIT WITH Commons-Clause",
|
"license": "MIT WITH Commons-Clause",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/anthropic": "^1.2.10",
|
"@ai-sdk/anthropic": "^1.2.10",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.13.2-rc.1",
|
"version": "0.13.2",
|
||||||
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
"description": "A task management system for ambitious AI-driven development that doesn't overwhelm and confuse Cursor.",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ The script can be configured through environment variables in a `.env` file at t
|
|||||||
- `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation
|
- `PERPLEXITY_API_KEY`: Your Perplexity API key for research-backed subtask generation
|
||||||
- `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online")
|
- `PERPLEXITY_MODEL`: Specify which Perplexity model to use (default: "sonar-medium-online")
|
||||||
- `DEBUG`: Enable debug logging (default: false)
|
- `DEBUG`: Enable debug logging (default: false)
|
||||||
- `LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
|
- `TASKMASTER_LOG_LEVEL`: Log level - debug, info, warn, error (default: info)
|
||||||
- `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3)
|
- `DEFAULT_SUBTASKS`: Default number of subtasks when expanding (default: 3)
|
||||||
- `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium)
|
- `DEFAULT_PRIORITY`: Default priority for generated tasks (default: medium)
|
||||||
- `PROJECT_NAME`: Override default project name in tasks.json
|
- `PROJECT_NAME`: Override default project name in tasks.json
|
||||||
@@ -225,7 +225,7 @@ To use the Perplexity integration:
|
|||||||
|
|
||||||
## Logging
|
## Logging
|
||||||
|
|
||||||
The script supports different logging levels controlled by the `LOG_LEVEL` environment variable:
|
The script supports different logging levels controlled by the `TASKMASTER_LOG_LEVEL` environment variable:
|
||||||
|
|
||||||
- `debug`: Detailed information, typically useful for troubleshooting
|
- `debug`: Detailed information, typically useful for troubleshooting
|
||||||
- `info`: Confirmation that things are working as expected (default)
|
- `info`: Confirmation that things are working as expected (default)
|
||||||
|
|||||||
@@ -38,10 +38,10 @@ const LOG_LEVELS = {
|
|||||||
success: 4
|
success: 4
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get log level from environment or default to info
|
// Determine log level from environment variable or default to 'info'
|
||||||
const LOG_LEVEL = process.env.LOG_LEVEL
|
const LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL
|
||||||
? LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()]
|
? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()]
|
||||||
: LOG_LEVELS.info;
|
: LOG_LEVELS.info; // Default to info
|
||||||
|
|
||||||
// Create a color gradient for the banner
|
// Create a color gradient for the banner
|
||||||
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
||||||
|
|||||||
@@ -17,7 +17,8 @@ import {
|
|||||||
getParametersForRole,
|
getParametersForRole,
|
||||||
getUserId,
|
getUserId,
|
||||||
MODEL_MAP,
|
MODEL_MAP,
|
||||||
getDebugFlag
|
getDebugFlag,
|
||||||
|
getBaseUrlForRole
|
||||||
} from './config-manager.js';
|
} from './config-manager.js';
|
||||||
import { log, resolveEnvVariable, isSilentMode } from './utils.js';
|
import { log, resolveEnvVariable, isSilentMode } from './utils.js';
|
||||||
|
|
||||||
@@ -339,9 +340,15 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
'AI service call failed for all configured roles.';
|
'AI service call failed for all configured roles.';
|
||||||
|
|
||||||
for (const currentRole of sequence) {
|
for (const currentRole of sequence) {
|
||||||
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
|
let providerName,
|
||||||
let providerResponse;
|
modelId,
|
||||||
let telemetryData = null;
|
apiKey,
|
||||||
|
roleParams,
|
||||||
|
providerFnSet,
|
||||||
|
providerApiFn,
|
||||||
|
baseUrl,
|
||||||
|
providerResponse,
|
||||||
|
telemetryData = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log('info', `New AI service call with role: ${currentRole}`);
|
log('info', `New AI service call with role: ${currentRole}`);
|
||||||
@@ -382,6 +389,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
|
|
||||||
// Pass effectiveProjectRoot to getParametersForRole
|
// Pass effectiveProjectRoot to getParametersForRole
|
||||||
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
||||||
|
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
|
||||||
|
|
||||||
// 2. Get Provider Function Set
|
// 2. Get Provider Function Set
|
||||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||||
@@ -458,6 +466,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
maxTokens: roleParams.maxTokens,
|
maxTokens: roleParams.maxTokens,
|
||||||
temperature: roleParams.temperature,
|
temperature: roleParams.temperature,
|
||||||
messages,
|
messages,
|
||||||
|
baseUrl,
|
||||||
...(serviceType === 'generateObject' && { schema, objectName }),
|
...(serviceType === 'generateObject' && { schema, objectName }),
|
||||||
...restApiParams
|
...restApiParams
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -74,7 +74,11 @@ import {
|
|||||||
getApiKeyStatusReport
|
getApiKeyStatusReport
|
||||||
} from './task-manager/models.js';
|
} from './task-manager/models.js';
|
||||||
import { findProjectRoot } from './utils.js';
|
import { findProjectRoot } from './utils.js';
|
||||||
|
import {
|
||||||
|
isValidTaskStatus,
|
||||||
|
TASK_STATUS_OPTIONS
|
||||||
|
} from '../../src/constants/task-status.js';
|
||||||
|
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||||
/**
|
/**
|
||||||
* Runs the interactive setup process for model configuration.
|
* Runs the interactive setup process for model configuration.
|
||||||
* @param {string|null} projectRoot - The resolved project root directory.
|
* @param {string|null} projectRoot - The resolved project root directory.
|
||||||
@@ -487,11 +491,6 @@ function registerCommands(programInstance) {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Default help
|
|
||||||
programInstance.on('--help', function () {
|
|
||||||
displayHelp();
|
|
||||||
});
|
|
||||||
|
|
||||||
// parse-prd command
|
// parse-prd command
|
||||||
programInstance
|
programInstance
|
||||||
.command('parse-prd')
|
.command('parse-prd')
|
||||||
@@ -1039,7 +1038,7 @@ function registerCommands(programInstance) {
|
|||||||
)
|
)
|
||||||
.option(
|
.option(
|
||||||
'-s, --status <status>',
|
'-s, --status <status>',
|
||||||
'New status (todo, in-progress, review, done)'
|
`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})`
|
||||||
)
|
)
|
||||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
@@ -1052,6 +1051,16 @@ function registerCommands(programInstance) {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!isValidTaskStatus(status)) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
|
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
|
||||||
);
|
);
|
||||||
@@ -1273,10 +1282,6 @@ function registerCommands(programInstance) {
|
|||||||
'--details <details>',
|
'--details <details>',
|
||||||
'Implementation details (for manual task creation)'
|
'Implementation details (for manual task creation)'
|
||||||
)
|
)
|
||||||
.option(
|
|
||||||
'--test-strategy <testStrategy>',
|
|
||||||
'Test strategy (for manual task creation)'
|
|
||||||
)
|
|
||||||
.option(
|
.option(
|
||||||
'--dependencies <dependencies>',
|
'--dependencies <dependencies>',
|
||||||
'Comma-separated list of task IDs this task depends on'
|
'Comma-separated list of task IDs this task depends on'
|
||||||
@@ -1654,6 +1659,7 @@ function registerCommands(programInstance) {
|
|||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
|
showAddSubtaskHelp();
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -2357,14 +2363,7 @@ function setupCLI() {
|
|||||||
return 'unknown'; // Default fallback if package.json fails
|
return 'unknown'; // Default fallback if package.json fails
|
||||||
})
|
})
|
||||||
.helpOption('-h, --help', 'Display help')
|
.helpOption('-h, --help', 'Display help')
|
||||||
.addHelpCommand(false) // Disable default help command
|
.addHelpCommand(false); // Disable default help command
|
||||||
.on('--help', () => {
|
|
||||||
displayHelp(); // Use your custom help display instead
|
|
||||||
})
|
|
||||||
.on('-h', () => {
|
|
||||||
displayHelp();
|
|
||||||
process.exit(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Modify the help option to use your custom display
|
// Modify the help option to use your custom display
|
||||||
programInstance.helpInformation = () => {
|
programInstance.helpInformation = () => {
|
||||||
@@ -2384,28 +2383,7 @@ function setupCLI() {
|
|||||||
*/
|
*/
|
||||||
async function checkForUpdate() {
|
async function checkForUpdate() {
|
||||||
// Get current version from package.json ONLY
|
// Get current version from package.json ONLY
|
||||||
let currentVersion = 'unknown'; // Initialize with a default
|
const currentVersion = getTaskMasterVersion();
|
||||||
try {
|
|
||||||
// Try to get the version from the installed package (if applicable) or current dir
|
|
||||||
let packageJsonPath = path.join(
|
|
||||||
process.cwd(),
|
|
||||||
'node_modules',
|
|
||||||
'task-master-ai',
|
|
||||||
'package.json'
|
|
||||||
);
|
|
||||||
// Fallback to current directory package.json if not found in node_modules
|
|
||||||
if (!fs.existsSync(packageJsonPath)) {
|
|
||||||
packageJsonPath = path.join(process.cwd(), 'package.json');
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fs.existsSync(packageJsonPath)) {
|
|
||||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
|
||||||
currentVersion = packageJson.version;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Silently fail and use default
|
|
||||||
log('debug', `Error reading current package version: ${error.message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
// Get the latest version from npm registry
|
// Get the latest version from npm registry
|
||||||
|
|||||||
@@ -687,6 +687,13 @@ function getAllProviders() {
|
|||||||
return Object.keys(MODEL_MAP || {});
|
return Object.keys(MODEL_MAP || {});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getBaseUrlForRole(role, explicitRoot = null) {
|
||||||
|
const roleConfig = getModelConfigForRole(role, explicitRoot);
|
||||||
|
return roleConfig && typeof roleConfig.baseUrl === 'string'
|
||||||
|
? roleConfig.baseUrl
|
||||||
|
: undefined;
|
||||||
|
}
|
||||||
|
|
||||||
export {
|
export {
|
||||||
// Core config access
|
// Core config access
|
||||||
getConfig,
|
getConfig,
|
||||||
@@ -714,6 +721,7 @@ export {
|
|||||||
getFallbackModelId,
|
getFallbackModelId,
|
||||||
getFallbackMaxTokens,
|
getFallbackMaxTokens,
|
||||||
getFallbackTemperature,
|
getFallbackTemperature,
|
||||||
|
getBaseUrlForRole,
|
||||||
|
|
||||||
// Global setting getters (No env var overrides)
|
// Global setting getters (No env var overrides)
|
||||||
getLogLevel,
|
getLogLevel,
|
||||||
|
|||||||
@@ -8,6 +8,10 @@ import { validateTaskDependencies } from '../dependency-manager.js';
|
|||||||
import { getDebugFlag } from '../config-manager.js';
|
import { getDebugFlag } from '../config-manager.js';
|
||||||
import updateSingleTaskStatus from './update-single-task-status.js';
|
import updateSingleTaskStatus from './update-single-task-status.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
import {
|
||||||
|
isValidTaskStatus,
|
||||||
|
TASK_STATUS_OPTIONS
|
||||||
|
} from '../../../src/constants/task-status.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the status of a task
|
* Set the status of a task
|
||||||
@@ -19,6 +23,11 @@ import generateTaskFiles from './generate-task-files.js';
|
|||||||
*/
|
*/
|
||||||
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
||||||
try {
|
try {
|
||||||
|
if (!isValidTaskStatus(newStatus)) {
|
||||||
|
throw new Error(
|
||||||
|
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
// Determine if we're in MCP mode by checking for mcpLog
|
// Determine if we're in MCP mode by checking for mcpLog
|
||||||
const isMcpMode = !!options?.mcpLog;
|
const isMcpMode = !!options?.mcpLog;
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
|
|
||||||
import { log } from '../utils.js';
|
import { log } from '../utils.js';
|
||||||
|
import { isValidTaskStatus } from '../../../src/constants/task-status.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update the status of a single task
|
* Update the status of a single task
|
||||||
@@ -17,6 +18,12 @@ async function updateSingleTaskStatus(
|
|||||||
data,
|
data,
|
||||||
showUi = true
|
showUi = true
|
||||||
) {
|
) {
|
||||||
|
if (!isValidTaskStatus(newStatus)) {
|
||||||
|
throw new Error(
|
||||||
|
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// Check if it's a subtask (e.g., "1.2")
|
// Check if it's a subtask (e.g., "1.2")
|
||||||
if (taskIdInput.includes('.')) {
|
if (taskIdInput.includes('.')) {
|
||||||
const [parentId, subtaskId] = taskIdInput
|
const [parentId, subtaskId] = taskIdInput
|
||||||
|
|||||||
@@ -16,10 +16,11 @@ import {
|
|||||||
truncate,
|
truncate,
|
||||||
isSilentMode
|
isSilentMode
|
||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import path from 'path';
|
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
||||||
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
||||||
|
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
|
||||||
|
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||||
|
|
||||||
// Create a color gradient for the banner
|
// Create a color gradient for the banner
|
||||||
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
||||||
@@ -46,17 +47,7 @@ function displayBanner() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Read version directly from package.json
|
// Read version directly from package.json
|
||||||
let version = 'unknown'; // Initialize with a default
|
const version = getTaskMasterVersion();
|
||||||
try {
|
|
||||||
const packageJsonPath = path.join(process.cwd(), 'package.json');
|
|
||||||
if (fs.existsSync(packageJsonPath)) {
|
|
||||||
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
|
||||||
version = packageJson.version;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Silently fall back to default version
|
|
||||||
log('warn', 'Could not read package.json for version info.');
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
boxen(
|
boxen(
|
||||||
@@ -458,7 +449,7 @@ function displayHelp() {
|
|||||||
{
|
{
|
||||||
name: 'set-status',
|
name: 'set-status',
|
||||||
args: '--id=<id> --status=<status>',
|
args: '--id=<id> --status=<status>',
|
||||||
desc: 'Update task status (done, pending, etc.)'
|
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: 'update',
|
name: 'update',
|
||||||
@@ -809,12 +800,7 @@ async function displayNextTask(tasksPath) {
|
|||||||
'padding-bottom': 0,
|
'padding-bottom': 0,
|
||||||
compact: true
|
compact: true
|
||||||
},
|
},
|
||||||
chars: {
|
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },
|
||||||
mid: '',
|
|
||||||
'left-mid': '',
|
|
||||||
'mid-mid': '',
|
|
||||||
'right-mid': ''
|
|
||||||
},
|
|
||||||
colWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],
|
colWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],
|
||||||
wordWrap: true
|
wordWrap: true
|
||||||
});
|
});
|
||||||
@@ -905,12 +891,7 @@ async function displayNextTask(tasksPath) {
|
|||||||
'padding-bottom': 0,
|
'padding-bottom': 0,
|
||||||
compact: true
|
compact: true
|
||||||
},
|
},
|
||||||
chars: {
|
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },
|
||||||
mid: '',
|
|
||||||
'left-mid': '',
|
|
||||||
'mid-mid': '',
|
|
||||||
'right-mid': ''
|
|
||||||
},
|
|
||||||
wordWrap: true
|
wordWrap: true
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
* using the Vercel AI SDK.
|
* using the Vercel AI SDK.
|
||||||
*/
|
*/
|
||||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||||
import { generateText, streamText, generateObject, streamObject } from 'ai';
|
import { generateText, streamText, generateObject } from 'ai';
|
||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||||
|
|
||||||
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
||||||
@@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
|
|||||||
// Remove the global variable and caching logic
|
// Remove the global variable and caching logic
|
||||||
// let anthropicClient;
|
// let anthropicClient;
|
||||||
|
|
||||||
function getClient(apiKey) {
|
function getClient(apiKey, baseUrl) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
// In a real scenario, this would use the config resolver.
|
// In a real scenario, this would use the config resolver.
|
||||||
// Throwing error here if key isn't passed for simplicity.
|
// Throwing error here if key isn't passed for simplicity.
|
||||||
@@ -30,14 +30,12 @@ function getClient(apiKey) {
|
|||||||
// Create and return a new instance directly with standard version header
|
// Create and return a new instance directly with standard version header
|
||||||
return createAnthropic({
|
return createAnthropic({
|
||||||
apiKey: apiKey,
|
apiKey: apiKey,
|
||||||
baseURL: 'https://api.anthropic.com/v1',
|
...(baseUrl && { baseURL: baseUrl }),
|
||||||
// Use standard version header instead of beta
|
// Use standard version header instead of beta
|
||||||
headers: {
|
headers: {
|
||||||
'anthropic-beta': 'output-128k-2025-02-19'
|
'anthropic-beta': 'output-128k-2025-02-19'
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// }
|
|
||||||
// return anthropicClient;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Standardized Service Function Implementations ---
|
// --- Standardized Service Function Implementations ---
|
||||||
@@ -51,6 +49,7 @@ function getClient(apiKey) {
|
|||||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||||
* @returns {Promise<object>} The generated text content and usage.
|
* @returns {Promise<object>} The generated text content and usage.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -59,11 +58,12 @@ export async function generateAnthropicText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -100,6 +100,7 @@ export async function generateAnthropicText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -108,20 +109,20 @@ export async function streamAnthropicText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
// --- DEBUG LOGGING --- >>
|
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
'[streamAnthropicText] Parameters received by streamText:',
|
'[streamAnthropicText] Parameters received by streamText:',
|
||||||
JSON.stringify(
|
JSON.stringify(
|
||||||
{
|
{
|
||||||
modelId: modelId, // Log modelId being used
|
modelId: modelId,
|
||||||
messages: messages, // Log the messages array
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
},
|
},
|
||||||
@@ -129,25 +130,19 @@ export async function streamAnthropicText({
|
|||||||
2
|
2
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
// --- << DEBUG LOGGING ---
|
|
||||||
|
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
// Beta header moved to client initialization
|
|
||||||
// TODO: Add other relevant parameters
|
// TODO: Add other relevant parameters
|
||||||
});
|
});
|
||||||
|
|
||||||
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
||||||
return stream;
|
return stream;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log('error', `Anthropic streamText failed: ${error.message}`, error.stack);
|
||||||
'error',
|
|
||||||
`Anthropic streamText failed: ${error.message}`,
|
|
||||||
error.stack // Log stack trace for more details
|
|
||||||
);
|
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -167,6 +162,7 @@ export async function streamAnthropicText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||||
* @returns {Promise<object>} The generated object matching the schema and usage.
|
* @returns {Promise<object>} The generated object matching the schema and usage.
|
||||||
* @throws {Error} If generation or validation fails.
|
* @throws {Error} If generation or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -178,24 +174,22 @@ export async function generateAnthropicObject({
|
|||||||
objectName = 'generated_object',
|
objectName = 'generated_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 3
|
maxRetries = 3,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
// Log basic debug info
|
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
||||||
);
|
);
|
||||||
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
|
mode: 'tool',
|
||||||
schema: schema,
|
schema: schema,
|
||||||
messages: messages,
|
messages: messages,
|
||||||
tool: {
|
tool: {
|
||||||
@@ -206,7 +200,6 @@ export async function generateAnthropicObject({
|
|||||||
temperature: temperature,
|
temperature: temperature,
|
||||||
maxRetries: maxRetries
|
maxRetries: maxRetries
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||||
@@ -220,7 +213,6 @@ export async function generateAnthropicObject({
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Simple error logging
|
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
||||||
|
|||||||
@@ -12,6 +12,16 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
|||||||
const DEFAULT_MODEL = 'gemini-2.5-pro-exp-03-25'; // Or a suitable default
|
const DEFAULT_MODEL = 'gemini-2.5-pro-exp-03-25'; // Or a suitable default
|
||||||
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
||||||
|
|
||||||
|
function getClient(apiKey, baseUrl) {
|
||||||
|
if (!apiKey) {
|
||||||
|
throw new Error('Google API key is required.');
|
||||||
|
}
|
||||||
|
return createGoogleGenerativeAI({
|
||||||
|
apiKey: apiKey,
|
||||||
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using a Google AI model.
|
* Generates text using a Google AI model.
|
||||||
*
|
*
|
||||||
@@ -29,7 +39,8 @@ async function generateGoogleText({
|
|||||||
modelId = DEFAULT_MODEL,
|
modelId = DEFAULT_MODEL,
|
||||||
temperature = DEFAULT_TEMPERATURE,
|
temperature = DEFAULT_TEMPERATURE,
|
||||||
messages,
|
messages,
|
||||||
maxTokens // Note: Vercel SDK might handle this differently, needs verification
|
maxTokens,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -37,18 +48,13 @@ async function generateGoogleText({
|
|||||||
log('info', `Generating text with Google model: ${modelId}`);
|
log('info', `Generating text with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
const googleProvider = getClient(apiKey, baseUrl);
|
||||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
const model = googleProvider(modelId);
|
||||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
|
||||||
const model = googleProvider(modelId); // Correct model retrieval
|
|
||||||
|
|
||||||
// Construct payload suitable for Vercel SDK's generateText
|
|
||||||
// Note: The exact structure might depend on how messages are passed
|
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model, // Pass the model instance
|
model,
|
||||||
messages, // Pass the messages array directly
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available
|
maxOutputTokens: maxTokens
|
||||||
});
|
});
|
||||||
|
|
||||||
// Assuming result structure provides text directly or within a property
|
// Assuming result structure provides text directly or within a property
|
||||||
@@ -66,7 +72,7 @@ async function generateGoogleText({
|
|||||||
'error',
|
'error',
|
||||||
`Error generating text with Google (${modelId}): ${error.message}`
|
`Error generating text with Google (${modelId}): ${error.message}`
|
||||||
);
|
);
|
||||||
throw error; // Re-throw for unified service handler
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,7 +93,8 @@ async function streamGoogleText({
|
|||||||
modelId = DEFAULT_MODEL,
|
modelId = DEFAULT_MODEL,
|
||||||
temperature = DEFAULT_TEMPERATURE,
|
temperature = DEFAULT_TEMPERATURE,
|
||||||
messages,
|
messages,
|
||||||
maxTokens
|
maxTokens,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -95,19 +102,15 @@ async function streamGoogleText({
|
|||||||
log('info', `Streaming text with Google model: ${modelId}`);
|
log('info', `Streaming text with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
const googleProvider = getClient(apiKey, baseUrl);
|
||||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
const model = googleProvider(modelId);
|
||||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
|
||||||
const model = googleProvider(modelId); // Correct model retrieval
|
|
||||||
|
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model, // Pass the model instance
|
model,
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens
|
maxOutputTokens: maxTokens
|
||||||
});
|
});
|
||||||
|
return stream;
|
||||||
return stream; // Return the stream directly
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
@@ -138,7 +141,8 @@ async function generateGoogleObject({
|
|||||||
messages,
|
messages,
|
||||||
schema,
|
schema,
|
||||||
objectName, // Note: Vercel SDK might use this differently or not at all
|
objectName, // Note: Vercel SDK might use this differently or not at all
|
||||||
maxTokens
|
maxTokens,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -146,20 +150,14 @@ async function generateGoogleObject({
|
|||||||
log('info', `Generating object with Google model: ${modelId}`);
|
log('info', `Generating object with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
const googleProvider = getClient(apiKey, baseUrl);
|
||||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
const model = googleProvider(modelId);
|
||||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
|
||||||
const model = googleProvider(modelId); // Correct model retrieval
|
|
||||||
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model, // Pass the model instance
|
model,
|
||||||
schema,
|
schema,
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens
|
maxOutputTokens: maxTokens
|
||||||
// Note: 'objectName' or 'mode' might not be directly applicable here
|
|
||||||
// depending on how `@ai-sdk/google` handles `generateObject`.
|
|
||||||
// Check SDK docs if specific tool calling/JSON mode needs explicit setup.
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// return object; // Return the parsed object
|
// return object; // Return the parsed object
|
||||||
|
|||||||
@@ -1,16 +1,26 @@
|
|||||||
import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
||||||
import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai'
|
import { generateObject } from 'ai'; // Import necessary functions from 'ai'
|
||||||
import { log } from '../../scripts/modules/utils.js';
|
import { log } from '../../scripts/modules/utils.js';
|
||||||
|
|
||||||
|
function getClient(apiKey, baseUrl) {
|
||||||
|
if (!apiKey) {
|
||||||
|
throw new Error('OpenAI API key is required.');
|
||||||
|
}
|
||||||
|
return createOpenAI({
|
||||||
|
apiKey: apiKey,
|
||||||
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using OpenAI models via Vercel AI SDK.
|
* Generates text using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||||
* @returns {Promise<object>} The generated text content and usage.
|
* @returns {Promise<object>} The generated text content and usage.
|
||||||
* @throws {Error} If API call fails.
|
* @throws {Error} If API call fails.
|
||||||
*/
|
*/
|
||||||
export async function generateOpenAIText(params) {
|
export async function generateOpenAIText(params) {
|
||||||
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||||
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
||||||
|
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
@@ -23,7 +33,7 @@ export async function generateOpenAIText(params) {
|
|||||||
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
||||||
}
|
}
|
||||||
|
|
||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
@@ -67,12 +77,12 @@ export async function generateOpenAIText(params) {
|
|||||||
/**
|
/**
|
||||||
* Streams text using OpenAI models via Vercel AI SDK.
|
* Streams text using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||||
* @throws {Error} If API call fails.
|
* @throws {Error} If API call fails.
|
||||||
*/
|
*/
|
||||||
export async function streamOpenAIText(params) {
|
export async function streamOpenAIText(params) {
|
||||||
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||||
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
||||||
|
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
@@ -87,7 +97,7 @@ export async function streamOpenAIText(params) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const stream = await openaiClient.chat.stream(messages, {
|
const stream = await openaiClient.chat.stream(messages, {
|
||||||
@@ -116,7 +126,7 @@ export async function streamOpenAIText(params) {
|
|||||||
/**
|
/**
|
||||||
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
|
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl.
|
||||||
* @returns {Promise<object>} The generated object matching the schema and usage.
|
* @returns {Promise<object>} The generated object matching the schema and usage.
|
||||||
* @throws {Error} If API call fails or object generation fails.
|
* @throws {Error} If API call fails or object generation fails.
|
||||||
*/
|
*/
|
||||||
@@ -128,7 +138,8 @@ export async function generateOpenAIObject(params) {
|
|||||||
schema,
|
schema,
|
||||||
objectName,
|
objectName,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
} = params;
|
} = params;
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
@@ -144,7 +155,7 @@ export async function generateOpenAIObject(params) {
|
|||||||
if (!objectName)
|
if (!objectName)
|
||||||
throw new Error('Object name is required for OpenAI object generation.');
|
throw new Error('Object name is required for OpenAI object generation.');
|
||||||
|
|
||||||
const openaiClient = createOpenAI({ apiKey });
|
const openaiClient = getClient(apiKey, baseUrl);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
|
|||||||
@@ -2,6 +2,14 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
|||||||
import { generateText, streamText, generateObject } from 'ai';
|
import { generateText, streamText, generateObject } from 'ai';
|
||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
||||||
|
|
||||||
|
function getClient(apiKey, baseUrl) {
|
||||||
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
|
return createOpenRouter({
|
||||||
|
apiKey,
|
||||||
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using an OpenRouter chat model.
|
* Generates text using an OpenRouter chat model.
|
||||||
*
|
*
|
||||||
@@ -11,6 +19,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in
|
|||||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||||
* @param {number} [params.temperature] - Sampling temperature.
|
* @param {number} [params.temperature] - Sampling temperature.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -20,6 +29,7 @@ async function generateOpenRouterText({
|
|||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
|
baseUrl,
|
||||||
...rest // Capture any other Vercel AI SDK compatible parameters
|
...rest // Capture any other Vercel AI SDK compatible parameters
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -28,7 +38,7 @@ async function generateOpenRouterText({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = createOpenRouter({ apiKey });
|
const openrouter = getClient(apiKey, baseUrl);
|
||||||
const model = openrouter.chat(modelId); // Assuming chat model
|
const model = openrouter.chat(modelId); // Assuming chat model
|
||||||
|
|
||||||
// Capture the full result from generateText
|
// Capture the full result from generateText
|
||||||
@@ -85,6 +95,7 @@ async function generateOpenRouterText({
|
|||||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||||
* @param {number} [params.temperature] - Sampling temperature.
|
* @param {number} [params.temperature] - Sampling temperature.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||||
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -94,6 +105,7 @@ async function streamOpenRouterText({
|
|||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
|
baseUrl,
|
||||||
...rest
|
...rest
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -102,7 +114,7 @@ async function streamOpenRouterText({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = createOpenRouter({ apiKey });
|
const openrouter = getClient(apiKey, baseUrl);
|
||||||
const model = openrouter.chat(modelId);
|
const model = openrouter.chat(modelId);
|
||||||
|
|
||||||
// Directly return the stream from the Vercel AI SDK function
|
// Directly return the stream from the Vercel AI SDK function
|
||||||
@@ -135,6 +147,7 @@ async function streamOpenRouterText({
|
|||||||
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens.
|
* @param {number} [params.maxTokens] - Maximum tokens.
|
||||||
* @param {number} [params.temperature] - Temperature.
|
* @param {number} [params.temperature] - Temperature.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If the API call fails or validation fails.
|
* @throws {Error} If the API call fails or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -147,6 +160,7 @@ async function generateOpenRouterObject({
|
|||||||
maxRetries = 3,
|
maxRetries = 3,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
|
baseUrl,
|
||||||
...rest
|
...rest
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -156,7 +170,7 @@ async function generateOpenRouterObject({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = createOpenRouter({ apiKey });
|
const openrouter = getClient(apiKey, baseUrl);
|
||||||
const model = openrouter.chat(modelId);
|
const model = openrouter.chat(modelId);
|
||||||
|
|
||||||
// Capture the full result from generateObject
|
// Capture the full result from generateObject
|
||||||
|
|||||||
@@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js';
|
|||||||
|
|
||||||
// --- Client Instantiation ---
|
// --- Client Instantiation ---
|
||||||
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
||||||
function getClient(apiKey) {
|
function getClient(apiKey, baseUrl) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Perplexity API key is required.');
|
throw new Error('Perplexity API key is required.');
|
||||||
}
|
}
|
||||||
// Create and return a new instance directly
|
|
||||||
return createPerplexity({
|
return createPerplexity({
|
||||||
apiKey: apiKey
|
apiKey: apiKey,
|
||||||
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,6 +31,7 @@ function getClient(apiKey) {
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -39,11 +40,12 @@ export async function generatePerplexityText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -77,6 +79,7 @@ export async function generatePerplexityText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -85,11 +88,12 @@ export async function streamPerplexityText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -119,6 +123,7 @@ export async function streamPerplexityText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
|
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If generation or validation fails or is unsupported.
|
* @throws {Error} If generation or validation fails or is unsupported.
|
||||||
*/
|
*/
|
||||||
@@ -130,7 +135,8 @@ export async function generatePerplexityObject({
|
|||||||
objectName = 'generated_object',
|
objectName = 'generated_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 1 // Lower retries as support might be limited
|
maxRetries = 1,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
@@ -141,8 +147,7 @@ export async function generatePerplexityObject({
|
|||||||
'generateObject support for Perplexity might be limited or experimental.'
|
'generateObject support for Perplexity might be limited or experimental.'
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
// Attempt using generateObject, but be prepared for potential issues
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
schema: schema,
|
schema: schema,
|
||||||
|
|||||||
@@ -9,14 +9,13 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh
|
|||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||||
|
|
||||||
// --- Client Instantiation ---
|
// --- Client Instantiation ---
|
||||||
function getClient(apiKey) {
|
function getClient(apiKey, baseUrl) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('xAI API key is required.');
|
throw new Error('xAI API key is required.');
|
||||||
}
|
}
|
||||||
// Create and return a new instance directly
|
|
||||||
return createXai({
|
return createXai({
|
||||||
apiKey: apiKey
|
apiKey: apiKey,
|
||||||
// Add baseURL or other options if needed later
|
...(baseUrl && { baseURL: baseUrl })
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,6 +30,7 @@ function getClient(apiKey) {
|
|||||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||||
* @returns {Promise<object>} The generated text content and usage.
|
* @returns {Promise<object>} The generated text content and usage.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -39,13 +39,14 @@ export async function generateXaiText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating xAI text with model: ${modelId}`);
|
log('debug', `Generating xAI text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId), // Correct model invocation
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
@@ -77,6 +78,7 @@ export async function generateXaiText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -85,18 +87,19 @@ export async function streamXaiText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature
|
temperature,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming xAI text with model: ${modelId}`);
|
log('debug', `Streaming xAI text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId), // Correct model invocation
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
});
|
});
|
||||||
return stream; // Return the full stream object
|
return stream;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
||||||
throw error;
|
throw error;
|
||||||
@@ -117,6 +120,7 @@ export async function streamXaiText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
|
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||||
* @returns {Promise<object>} The generated object matching the schema and its usage.
|
* @returns {Promise<object>} The generated object matching the schema and its usage.
|
||||||
* @throws {Error} If generation or validation fails.
|
* @throws {Error} If generation or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -128,16 +132,17 @@ export async function generateXaiObject({
|
|||||||
objectName = 'generated_xai_object',
|
objectName = 'generated_xai_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 3
|
maxRetries = 3,
|
||||||
|
baseUrl
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'warn', // Log warning as this is likely unsupported
|
'warn',
|
||||||
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey);
|
const client = getClient(apiKey, baseUrl);
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId), // Correct model invocation
|
model: client(modelId),
|
||||||
// Note: mode might need adjustment if xAI ever supports object generation differently
|
// Note: mode might need adjustment if xAI ever supports object generation differently
|
||||||
mode: 'tool',
|
mode: 'tool',
|
||||||
schema: schema,
|
schema: schema,
|
||||||
@@ -168,6 +173,6 @@ export async function generateXaiObject({
|
|||||||
'error',
|
'error',
|
||||||
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
||||||
);
|
);
|
||||||
throw error; // Re-throw the error
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
32
src/constants/task-status.js
Normal file
32
src/constants/task-status.js
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
/**
|
||||||
|
* @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Task status options list
|
||||||
|
* @type {TaskStatus[]}
|
||||||
|
* @description Defines possible task statuses:
|
||||||
|
* - pending: Task waiting to start
|
||||||
|
* - done: Task completed
|
||||||
|
* - in-progress: Task in progress
|
||||||
|
* - review: Task completed and waiting for review
|
||||||
|
* - deferred: Task postponed or paused
|
||||||
|
* - cancelled: Task cancelled and will not be completed
|
||||||
|
*/
|
||||||
|
export const TASK_STATUS_OPTIONS = [
|
||||||
|
'pending',
|
||||||
|
'done',
|
||||||
|
'in-progress',
|
||||||
|
'review',
|
||||||
|
'deferred',
|
||||||
|
'cancelled'
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a given status is a valid task status
|
||||||
|
* @param {string} status - The status to check
|
||||||
|
* @returns {boolean} True if the status is valid, false otherwise
|
||||||
|
*/
|
||||||
|
export function isValidTaskStatus(status) {
|
||||||
|
return TASK_STATUS_OPTIONS.includes(status);
|
||||||
|
}
|
||||||
35
src/utils/getVersion.js
Normal file
35
src/utils/getVersion.js
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import { log } from '../../scripts/modules/utils.js';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads the version from the nearest package.json relative to this file.
|
||||||
|
* Returns 'unknown' if not found or on error.
|
||||||
|
* @returns {string} The version string or 'unknown'.
|
||||||
|
*/
|
||||||
|
export function getTaskMasterVersion() {
|
||||||
|
let version = 'unknown';
|
||||||
|
try {
|
||||||
|
// Get the directory of the current module (getPackageVersion.js)
|
||||||
|
const currentModuleFilename = fileURLToPath(import.meta.url);
|
||||||
|
const currentModuleDirname = path.dirname(currentModuleFilename);
|
||||||
|
// Construct the path to package.json relative to this file (../../package.json)
|
||||||
|
const packageJsonPath = path.join(
|
||||||
|
currentModuleDirname,
|
||||||
|
'..',
|
||||||
|
'..',
|
||||||
|
'package.json'
|
||||||
|
);
|
||||||
|
|
||||||
|
if (fs.existsSync(packageJsonPath)) {
|
||||||
|
const packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8');
|
||||||
|
const packageJson = JSON.parse(packageJsonContent);
|
||||||
|
version = packageJson.version;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Silently fall back to default version
|
||||||
|
log('warn', 'Could not read own package.json for version info.', error);
|
||||||
|
}
|
||||||
|
return version;
|
||||||
|
}
|
||||||
@@ -9,7 +9,7 @@ process.env.MODEL = 'sonar-pro';
|
|||||||
process.env.MAX_TOKENS = '64000';
|
process.env.MAX_TOKENS = '64000';
|
||||||
process.env.TEMPERATURE = '0.2';
|
process.env.TEMPERATURE = '0.2';
|
||||||
process.env.DEBUG = 'false';
|
process.env.DEBUG = 'false';
|
||||||
process.env.LOG_LEVEL = 'error'; // Set to error to reduce noise in tests
|
process.env.TASKMASTER_LOG_LEVEL = 'error'; // Set to error to reduce noise in tests
|
||||||
process.env.DEFAULT_SUBTASKS = '5';
|
process.env.DEFAULT_SUBTASKS = '5';
|
||||||
process.env.DEFAULT_PRIORITY = 'medium';
|
process.env.DEFAULT_PRIORITY = 'medium';
|
||||||
process.env.PROJECT_NAME = 'Test Project';
|
process.env.PROJECT_NAME = 'Test Project';
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ const mockModelMap = {
|
|||||||
]
|
]
|
||||||
// Add other providers/models if needed for specific tests
|
// Add other providers/models if needed for specific tests
|
||||||
};
|
};
|
||||||
|
const mockGetBaseUrlForRole = jest.fn();
|
||||||
|
|
||||||
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||||
getMainProvider: mockGetMainProvider,
|
getMainProvider: mockGetMainProvider,
|
||||||
@@ -43,7 +44,8 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
|||||||
getParametersForRole: mockGetParametersForRole,
|
getParametersForRole: mockGetParametersForRole,
|
||||||
getUserId: mockGetUserId,
|
getUserId: mockGetUserId,
|
||||||
getDebugFlag: mockGetDebugFlag,
|
getDebugFlag: mockGetDebugFlag,
|
||||||
MODEL_MAP: mockModelMap
|
MODEL_MAP: mockModelMap,
|
||||||
|
getBaseUrlForRole: mockGetBaseUrlForRole
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Mock AI Provider Modules
|
// Mock AI Provider Modules
|
||||||
|
|||||||
@@ -199,6 +199,12 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
|||||||
|
|
||||||
// Simplified version of updateSingleTaskStatus for testing
|
// Simplified version of updateSingleTaskStatus for testing
|
||||||
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||||
|
if (!isValidTaskStatus(newStatus)) {
|
||||||
|
throw new Error(
|
||||||
|
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// Check if it's a subtask (e.g., "1.2")
|
// Check if it's a subtask (e.g., "1.2")
|
||||||
if (taskIdInput.includes('.')) {
|
if (taskIdInput.includes('.')) {
|
||||||
const [parentId, subtaskId] = taskIdInput
|
const [parentId, subtaskId] = taskIdInput
|
||||||
@@ -329,6 +335,10 @@ const testAddTask = (
|
|||||||
import * as taskManager from '../../scripts/modules/task-manager.js';
|
import * as taskManager from '../../scripts/modules/task-manager.js';
|
||||||
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
|
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
|
||||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
||||||
|
import {
|
||||||
|
isValidTaskStatus,
|
||||||
|
TASK_STATUS_OPTIONS
|
||||||
|
} from '../../src/constants/task-status.js';
|
||||||
|
|
||||||
// Destructure the required functions for convenience
|
// Destructure the required functions for convenience
|
||||||
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
|
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
|
||||||
@@ -1165,6 +1175,16 @@ describe('Task Manager Module', () => {
|
|||||||
expect(testTasksData.tasks[1].status).toBe('done');
|
expect(testTasksData.tasks[1].status).toBe('done');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test('should throw error for invalid status', async () => {
|
||||||
|
// Arrange
|
||||||
|
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
expect(() =>
|
||||||
|
testUpdateSingleTaskStatus(testTasksData, '2', 'Done')
|
||||||
|
).toThrow(/Error: Invalid status value: Done./);
|
||||||
|
});
|
||||||
|
|
||||||
test('should update subtask status', async () => {
|
test('should update subtask status', async () => {
|
||||||
// Arrange
|
// Arrange
|
||||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||||
|
|||||||
Reference in New Issue
Block a user