Compare commits
3 Commits
fix/set-ta
...
ThomasMldr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e8f73d1bea | ||
|
|
f9f3a24568 | ||
|
|
b1f3796ec7 |
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': minor
|
|
||||||
---
|
|
||||||
|
|
||||||
.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider.
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
'task-master-ai': patch
|
|
||||||
---
|
|
||||||
|
|
||||||
Fix the error handling of task status settings
|
|
||||||
38
.env.example
38
.env.example
@@ -1,9 +1,29 @@
|
|||||||
# API Keys (Required for using in any role i.e. main/research/fallback -- see `task-master models`)
|
# API Keys (Required)
|
||||||
ANTHROPIC_API_KEY=YOUR_ANTHROPIC_KEY_HERE
|
ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Format: sk-ant-api03-...
|
||||||
PERPLEXITY_API_KEY=YOUR_PERPLEXITY_KEY_HERE
|
PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Format: pplx-...
|
||||||
OPENAI_API_KEY=YOUR_OPENAI_KEY_HERE
|
OPENAI_API_KEY="your_openai_api_key_here" # Format: sk-...
|
||||||
GOOGLE_API_KEY=YOUR_GOOGLE_KEY_HERE
|
GOOGLE_API_KEY="your_google_api_key_here" # Format: AIza...
|
||||||
MISTRAL_API_KEY=YOUR_MISTRAL_KEY_HERE
|
MISTRAL_API_KEY="your_mistral_api_key_here" # Format: ...
|
||||||
OPENROUTER_API_KEY=YOUR_OPENROUTER_KEY_HERE
|
OPENROUTER_API_KEY="your_openrouter_api_key_here" # Format: sk-or-...
|
||||||
XAI_API_KEY=YOUR_XAI_KEY_HERE
|
XAI_API_KEY="your_xai_api_key_here" # Format: ...
|
||||||
AZURE_OPENAI_API_KEY=YOUR_AZURE_KEY_HERE
|
AZURE_OPENAI_API_KEY="your_azure_key_here" # Format: ...
|
||||||
|
|
||||||
|
# API Base URLs (Optional)
|
||||||
|
ANTHROPIC_API_BASE_URL="optional_base_url_here" # Optional custom base URL for Anthropic API
|
||||||
|
|
||||||
|
# Model Configuration
|
||||||
|
MODEL="claude-3-7-sonnet-20250219" # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229
|
||||||
|
PERPLEXITY_MODEL="sonar-pro" # Perplexity model for research-backed subtasks
|
||||||
|
MAX_TOKENS="64000" # Maximum tokens for model responses
|
||||||
|
TEMPERATURE="0.2" # Temperature for model responses (0.0-1.0)
|
||||||
|
|
||||||
|
# Logging Configuration
|
||||||
|
DEBUG="false" # Enable debug logging (true/false)
|
||||||
|
LOG_LEVEL="info" # Log level (debug, info, warn, error)
|
||||||
|
|
||||||
|
# Task Generation Settings
|
||||||
|
DEFAULT_SUBTASKS="5" # Default number of subtasks when expanding
|
||||||
|
DEFAULT_PRIORITY="medium" # Default priority for generated tasks (high, medium, low)
|
||||||
|
|
||||||
|
# Project Metadata (Optional)
|
||||||
|
PROJECT_NAME="Your Project Name" # Override default project name in tasks.json
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
|
|||||||
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
|
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
|
||||||
"env": {
|
"env": {
|
||||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||||
|
"ANTHROPIC_API_BASE_URL": "YOUR_CUSTOM_BASE_URL_HERE (optional)",
|
||||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||||
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
|
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
|
||||||
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
|
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
|
||||||
|
|||||||
@@ -3784,6 +3784,7 @@ In this tutorial, you'll learn how to build a LLM-powered chatbot client that co
|
|||||||
if (!ANTHROPIC_API_KEY) {
|
if (!ANTHROPIC_API_KEY) {
|
||||||
throw new Error("ANTHROPIC_API_KEY is not set");
|
throw new Error("ANTHROPIC_API_KEY is not set");
|
||||||
}
|
}
|
||||||
|
const ANTHROPIC_API_BASE_URL = process.env.ANTHROPIC_API_BASE_URL;
|
||||||
|
|
||||||
class MCPClient {
|
class MCPClient {
|
||||||
private mcp: Client;
|
private mcp: Client;
|
||||||
@@ -3794,6 +3795,7 @@ In this tutorial, you'll learn how to build a LLM-powered chatbot client that co
|
|||||||
constructor() {
|
constructor() {
|
||||||
this.anthropic = new Anthropic({
|
this.anthropic = new Anthropic({
|
||||||
apiKey: ANTHROPIC_API_KEY,
|
apiKey: ANTHROPIC_API_KEY,
|
||||||
|
baseUrl: ANTHROPIC_API_BASE_URL,
|
||||||
});
|
});
|
||||||
this.mcp = new Client({ name: "mcp-client-cli", version: "1.0.0" });
|
this.mcp = new Client({ name: "mcp-client-cli", version: "1.0.0" });
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,15 +15,13 @@ Taskmaster uses two primary methods for configuration:
|
|||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
"modelId": "claude-3-7-sonnet-20250219",
|
"modelId": "claude-3-7-sonnet-20250219",
|
||||||
"maxTokens": 64000,
|
"maxTokens": 64000,
|
||||||
"temperature": 0.2,
|
"temperature": 0.2
|
||||||
"baseUrl": "https://api.anthropic.com/v1"
|
|
||||||
},
|
},
|
||||||
"research": {
|
"research": {
|
||||||
"provider": "perplexity",
|
"provider": "perplexity",
|
||||||
"modelId": "sonar-pro",
|
"modelId": "sonar-pro",
|
||||||
"maxTokens": 8700,
|
"maxTokens": 8700,
|
||||||
"temperature": 0.1,
|
"temperature": 0.1
|
||||||
"baseUrl": "https://api.perplexity.ai/v1"
|
|
||||||
},
|
},
|
||||||
"fallback": {
|
"fallback": {
|
||||||
"provider": "anthropic",
|
"provider": "anthropic",
|
||||||
@@ -58,9 +56,8 @@ Taskmaster uses two primary methods for configuration:
|
|||||||
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
||||||
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
||||||
- `XAI_API_KEY`: Your X-AI API key.
|
- `XAI_API_KEY`: Your X-AI API key.
|
||||||
- **Optional Endpoint Overrides:**
|
- **Optional Endpoint Overrides (in .taskmasterconfig):**
|
||||||
- **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key.
|
||||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role).
|
|
||||||
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
||||||
|
|
||||||
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import {
|
|||||||
} from './utils.js';
|
} from './utils.js';
|
||||||
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
||||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||||
import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register the setTaskStatus tool with the MCP server
|
* Register the setTaskStatus tool with the MCP server
|
||||||
@@ -28,7 +27,7 @@ export function registerSetTaskStatusTool(server) {
|
|||||||
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
||||||
),
|
),
|
||||||
status: z
|
status: z
|
||||||
.enum(TASK_STATUS_OPTIONS)
|
.string()
|
||||||
.describe(
|
.describe(
|
||||||
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
||||||
),
|
),
|
||||||
|
|||||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.13.2",
|
"version": "0.12",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "task-master-ai",
|
"name": "task-master-ai",
|
||||||
"version": "0.13.2",
|
"version": "0.12",
|
||||||
"license": "MIT WITH Commons-Clause",
|
"license": "MIT WITH Commons-Clause",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/anthropic": "^1.2.10",
|
"@ai-sdk/anthropic": "^1.2.10",
|
||||||
|
|||||||
@@ -14,8 +14,7 @@ import {
|
|||||||
getResearchModelId,
|
getResearchModelId,
|
||||||
getFallbackProvider,
|
getFallbackProvider,
|
||||||
getFallbackModelId,
|
getFallbackModelId,
|
||||||
getParametersForRole,
|
getParametersForRole
|
||||||
getBaseUrlForRole
|
|
||||||
} from './config-manager.js';
|
} from './config-manager.js';
|
||||||
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
||||||
|
|
||||||
@@ -285,13 +284,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
'AI service call failed for all configured roles.';
|
'AI service call failed for all configured roles.';
|
||||||
|
|
||||||
for (const currentRole of sequence) {
|
for (const currentRole of sequence) {
|
||||||
let providerName,
|
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
|
||||||
modelId,
|
|
||||||
apiKey,
|
|
||||||
roleParams,
|
|
||||||
providerFnSet,
|
|
||||||
providerApiFn,
|
|
||||||
baseUrl;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
log('info', `New AI service call with role: ${currentRole}`);
|
log('info', `New AI service call with role: ${currentRole}`);
|
||||||
@@ -332,7 +325,6 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
|
|
||||||
// Pass effectiveProjectRoot to getParametersForRole
|
// Pass effectiveProjectRoot to getParametersForRole
|
||||||
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
||||||
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
|
|
||||||
|
|
||||||
// 2. Get Provider Function Set
|
// 2. Get Provider Function Set
|
||||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||||
@@ -409,7 +401,6 @@ async function _unifiedServiceRunner(serviceType, params) {
|
|||||||
maxTokens: roleParams.maxTokens,
|
maxTokens: roleParams.maxTokens,
|
||||||
temperature: roleParams.temperature,
|
temperature: roleParams.temperature,
|
||||||
messages,
|
messages,
|
||||||
baseUrl,
|
|
||||||
...(serviceType === 'generateObject' && { schema, objectName }),
|
...(serviceType === 'generateObject' && { schema, objectName }),
|
||||||
...restApiParams
|
...restApiParams
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -73,10 +73,6 @@ import {
|
|||||||
getApiKeyStatusReport
|
getApiKeyStatusReport
|
||||||
} from './task-manager/models.js';
|
} from './task-manager/models.js';
|
||||||
import { findProjectRoot } from './utils.js';
|
import { findProjectRoot } from './utils.js';
|
||||||
import {
|
|
||||||
isValidTaskStatus,
|
|
||||||
TASK_STATUS_OPTIONS
|
|
||||||
} from '../../src/constants/task-status.js';
|
|
||||||
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||||
/**
|
/**
|
||||||
* Runs the interactive setup process for model configuration.
|
* Runs the interactive setup process for model configuration.
|
||||||
@@ -1037,7 +1033,7 @@ function registerCommands(programInstance) {
|
|||||||
)
|
)
|
||||||
.option(
|
.option(
|
||||||
'-s, --status <status>',
|
'-s, --status <status>',
|
||||||
`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})`
|
'New status (todo, in-progress, review, done)'
|
||||||
)
|
)
|
||||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||||
.action(async (options) => {
|
.action(async (options) => {
|
||||||
@@ -1050,16 +1046,6 @@ function registerCommands(programInstance) {
|
|||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isValidTaskStatus(status)) {
|
|
||||||
console.error(
|
|
||||||
chalk.red(
|
|
||||||
`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(
|
console.log(
|
||||||
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
|
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -677,13 +677,6 @@ function getAllProviders() {
|
|||||||
return Object.keys(MODEL_MAP || {});
|
return Object.keys(MODEL_MAP || {});
|
||||||
}
|
}
|
||||||
|
|
||||||
function getBaseUrlForRole(role, explicitRoot = null) {
|
|
||||||
const roleConfig = getModelConfigForRole(role, explicitRoot);
|
|
||||||
return roleConfig && typeof roleConfig.baseUrl === 'string'
|
|
||||||
? roleConfig.baseUrl
|
|
||||||
: undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
export {
|
export {
|
||||||
// Core config access
|
// Core config access
|
||||||
getConfig,
|
getConfig,
|
||||||
@@ -711,7 +704,6 @@ export {
|
|||||||
getFallbackModelId,
|
getFallbackModelId,
|
||||||
getFallbackMaxTokens,
|
getFallbackMaxTokens,
|
||||||
getFallbackTemperature,
|
getFallbackTemperature,
|
||||||
getBaseUrlForRole,
|
|
||||||
|
|
||||||
// Global setting getters (No env var overrides)
|
// Global setting getters (No env var overrides)
|
||||||
getLogLevel,
|
getLogLevel,
|
||||||
|
|||||||
@@ -8,10 +8,6 @@ import { validateTaskDependencies } from '../dependency-manager.js';
|
|||||||
import { getDebugFlag } from '../config-manager.js';
|
import { getDebugFlag } from '../config-manager.js';
|
||||||
import updateSingleTaskStatus from './update-single-task-status.js';
|
import updateSingleTaskStatus from './update-single-task-status.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
import {
|
|
||||||
isValidTaskStatus,
|
|
||||||
TASK_STATUS_OPTIONS
|
|
||||||
} from '../../../src/constants/task-status.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the status of a task
|
* Set the status of a task
|
||||||
@@ -23,11 +19,6 @@ import {
|
|||||||
*/
|
*/
|
||||||
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
||||||
try {
|
try {
|
||||||
if (!isValidTaskStatus(newStatus)) {
|
|
||||||
throw new Error(
|
|
||||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// Determine if we're in MCP mode by checking for mcpLog
|
// Determine if we're in MCP mode by checking for mcpLog
|
||||||
const isMcpMode = !!options?.mcpLog;
|
const isMcpMode = !!options?.mcpLog;
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
|
|
||||||
import { log } from '../utils.js';
|
import { log } from '../utils.js';
|
||||||
import { isValidTaskStatus } from '../../../src/constants/task-status.js';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update the status of a single task
|
* Update the status of a single task
|
||||||
@@ -18,12 +17,6 @@ async function updateSingleTaskStatus(
|
|||||||
data,
|
data,
|
||||||
showUi = true
|
showUi = true
|
||||||
) {
|
) {
|
||||||
if (!isValidTaskStatus(newStatus)) {
|
|
||||||
throw new Error(
|
|
||||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's a subtask (e.g., "1.2")
|
// Check if it's a subtask (e.g., "1.2")
|
||||||
if (taskIdInput.includes('.')) {
|
if (taskIdInput.includes('.')) {
|
||||||
const [parentId, subtaskId] = taskIdInput
|
const [parentId, subtaskId] = taskIdInput
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ import {
|
|||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
||||||
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
||||||
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
|
|
||||||
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||||
|
|
||||||
// Create a color gradient for the banner
|
// Create a color gradient for the banner
|
||||||
@@ -449,7 +448,7 @@ function displayHelp() {
|
|||||||
{
|
{
|
||||||
name: 'set-status',
|
name: 'set-status',
|
||||||
args: '--id=<id> --status=<status>',
|
args: '--id=<id> --status=<status>',
|
||||||
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`
|
desc: 'Update task status (done, pending, etc.)'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: 'update',
|
name: 'update',
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
* using the Vercel AI SDK.
|
* using the Vercel AI SDK.
|
||||||
*/
|
*/
|
||||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||||
import { generateText, streamText, generateObject } from 'ai';
|
import { generateText, streamText, generateObject, streamObject } from 'ai';
|
||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||||
|
|
||||||
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
||||||
@@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
|
|||||||
// Remove the global variable and caching logic
|
// Remove the global variable and caching logic
|
||||||
// let anthropicClient;
|
// let anthropicClient;
|
||||||
|
|
||||||
function getClient(apiKey, baseUrl) {
|
function getClient(apiKey) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
// In a real scenario, this would use the config resolver.
|
// In a real scenario, this would use the config resolver.
|
||||||
// Throwing error here if key isn't passed for simplicity.
|
// Throwing error here if key isn't passed for simplicity.
|
||||||
@@ -30,12 +30,14 @@ function getClient(apiKey, baseUrl) {
|
|||||||
// Create and return a new instance directly with standard version header
|
// Create and return a new instance directly with standard version header
|
||||||
return createAnthropic({
|
return createAnthropic({
|
||||||
apiKey: apiKey,
|
apiKey: apiKey,
|
||||||
...(baseUrl && { baseURL: baseUrl }),
|
baseURL: 'https://api.anthropic.com/v1',
|
||||||
// Use standard version header instead of beta
|
// Use standard version header instead of beta
|
||||||
headers: {
|
headers: {
|
||||||
'anthropic-beta': 'output-128k-2025-02-19'
|
'anthropic-beta': 'output-128k-2025-02-19'
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
// }
|
||||||
|
// return anthropicClient;
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Standardized Service Function Implementations ---
|
// --- Standardized Service Function Implementations ---
|
||||||
@@ -49,7 +51,6 @@ function getClient(apiKey, baseUrl) {
|
|||||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -58,12 +59,11 @@ export async function generateAnthropicText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -93,7 +93,6 @@ export async function generateAnthropicText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -102,20 +101,20 @@ export async function streamAnthropicText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
|
|
||||||
|
// --- DEBUG LOGGING --- >>
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
'[streamAnthropicText] Parameters received by streamText:',
|
'[streamAnthropicText] Parameters received by streamText:',
|
||||||
JSON.stringify(
|
JSON.stringify(
|
||||||
{
|
{
|
||||||
modelId: modelId,
|
modelId: modelId, // Log modelId being used
|
||||||
messages: messages,
|
messages: messages, // Log the messages array
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
},
|
},
|
||||||
@@ -123,19 +122,25 @@ export async function streamAnthropicText({
|
|||||||
2
|
2
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
// --- << DEBUG LOGGING ---
|
||||||
|
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
|
// Beta header moved to client initialization
|
||||||
// TODO: Add other relevant parameters
|
// TODO: Add other relevant parameters
|
||||||
});
|
});
|
||||||
|
|
||||||
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
||||||
return stream;
|
return stream;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `Anthropic streamText failed: ${error.message}`, error.stack);
|
log(
|
||||||
|
'error',
|
||||||
|
`Anthropic streamText failed: ${error.message}`,
|
||||||
|
error.stack // Log stack trace for more details
|
||||||
|
);
|
||||||
throw error;
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -155,7 +160,6 @@ export async function streamAnthropicText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If generation or validation fails.
|
* @throws {Error} If generation or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -167,22 +171,24 @@ export async function generateAnthropicObject({
|
|||||||
objectName = 'generated_object',
|
objectName = 'generated_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 3,
|
maxRetries = 3
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
|
|
||||||
|
// Log basic debug info
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
||||||
);
|
);
|
||||||
|
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
mode: 'tool',
|
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
|
||||||
schema: schema,
|
schema: schema,
|
||||||
messages: messages,
|
messages: messages,
|
||||||
tool: {
|
tool: {
|
||||||
@@ -193,12 +199,14 @@ export async function generateAnthropicObject({
|
|||||||
temperature: temperature,
|
temperature: temperature,
|
||||||
maxRetries: maxRetries
|
maxRetries: maxRetries
|
||||||
});
|
});
|
||||||
|
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||||
);
|
);
|
||||||
return result.object;
|
return result.object;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
// Simple error logging
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
||||||
|
|||||||
@@ -12,16 +12,6 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
|||||||
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
|
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
|
||||||
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
||||||
|
|
||||||
function getClient(apiKey, baseUrl) {
|
|
||||||
if (!apiKey) {
|
|
||||||
throw new Error('Google API key is required.');
|
|
||||||
}
|
|
||||||
return createGoogleGenerativeAI({
|
|
||||||
apiKey: apiKey,
|
|
||||||
...(baseUrl && { baseURL: baseUrl })
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using a Google AI model.
|
* Generates text using a Google AI model.
|
||||||
*
|
*
|
||||||
@@ -39,8 +29,7 @@ async function generateGoogleText({
|
|||||||
modelId = DEFAULT_MODEL,
|
modelId = DEFAULT_MODEL,
|
||||||
temperature = DEFAULT_TEMPERATURE,
|
temperature = DEFAULT_TEMPERATURE,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens // Note: Vercel SDK might handle this differently, needs verification
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -48,21 +37,28 @@ async function generateGoogleText({
|
|||||||
log('info', `Generating text with Google model: ${modelId}`);
|
log('info', `Generating text with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const googleProvider = getClient(apiKey, baseUrl);
|
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||||
const model = googleProvider(modelId);
|
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||||
|
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||||
|
const model = googleProvider(modelId); // Correct model retrieval
|
||||||
|
|
||||||
|
// Construct payload suitable for Vercel SDK's generateText
|
||||||
|
// Note: The exact structure might depend on how messages are passed
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model,
|
model, // Pass the model instance
|
||||||
messages,
|
messages, // Pass the messages array directly
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens
|
maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available
|
||||||
});
|
});
|
||||||
return result.text;
|
|
||||||
|
// Assuming result structure provides text directly or within a property
|
||||||
|
return result.text; // Adjust based on actual SDK response
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
`Error generating text with Google (${modelId}): ${error.message}`
|
`Error generating text with Google (${modelId}): ${error.message}`
|
||||||
);
|
);
|
||||||
throw error;
|
throw error; // Re-throw for unified service handler
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,8 +79,7 @@ async function streamGoogleText({
|
|||||||
modelId = DEFAULT_MODEL,
|
modelId = DEFAULT_MODEL,
|
||||||
temperature = DEFAULT_TEMPERATURE,
|
temperature = DEFAULT_TEMPERATURE,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -92,15 +87,19 @@ async function streamGoogleText({
|
|||||||
log('info', `Streaming text with Google model: ${modelId}`);
|
log('info', `Streaming text with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const googleProvider = getClient(apiKey, baseUrl);
|
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||||
const model = googleProvider(modelId);
|
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||||
|
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||||
|
const model = googleProvider(modelId); // Correct model retrieval
|
||||||
|
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model,
|
model, // Pass the model instance
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens
|
maxOutputTokens: maxTokens
|
||||||
});
|
});
|
||||||
return stream;
|
|
||||||
|
return stream; // Return the stream directly
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
@@ -131,8 +130,7 @@ async function generateGoogleObject({
|
|||||||
messages,
|
messages,
|
||||||
schema,
|
schema,
|
||||||
objectName, // Note: Vercel SDK might use this differently or not at all
|
objectName, // Note: Vercel SDK might use this differently or not at all
|
||||||
maxTokens,
|
maxTokens
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Google API key is required.');
|
throw new Error('Google API key is required.');
|
||||||
@@ -140,16 +138,23 @@ async function generateGoogleObject({
|
|||||||
log('info', `Generating object with Google model: ${modelId}`);
|
log('info', `Generating object with Google model: ${modelId}`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const googleProvider = getClient(apiKey, baseUrl);
|
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||||
const model = googleProvider(modelId);
|
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||||
|
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||||
|
const model = googleProvider(modelId); // Correct model retrieval
|
||||||
|
|
||||||
const { object } = await generateObject({
|
const { object } = await generateObject({
|
||||||
model,
|
model, // Pass the model instance
|
||||||
schema,
|
schema,
|
||||||
messages,
|
messages,
|
||||||
temperature,
|
temperature,
|
||||||
maxOutputTokens: maxTokens
|
maxOutputTokens: maxTokens
|
||||||
|
// Note: 'objectName' or 'mode' might not be directly applicable here
|
||||||
|
// depending on how `@ai-sdk/google` handles `generateObject`.
|
||||||
|
// Check SDK docs if specific tool calling/JSON mode needs explicit setup.
|
||||||
});
|
});
|
||||||
return object;
|
|
||||||
|
return object; // Return the parsed object
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
'error',
|
'error',
|
||||||
|
|||||||
@@ -1,26 +1,16 @@
|
|||||||
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
||||||
import { generateObject } from 'ai'; // Import necessary functions from 'ai'
|
import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai'
|
||||||
import { log } from '../../scripts/modules/utils.js';
|
import { log } from '../../scripts/modules/utils.js';
|
||||||
|
|
||||||
function getClient(apiKey, baseUrl) {
|
|
||||||
if (!apiKey) {
|
|
||||||
throw new Error('OpenAI API key is required.');
|
|
||||||
}
|
|
||||||
return createOpenAI({
|
|
||||||
apiKey: apiKey,
|
|
||||||
...(baseUrl && { baseURL: baseUrl })
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using OpenAI models via Vercel AI SDK.
|
* Generates text using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If API call fails.
|
* @throws {Error} If API call fails.
|
||||||
*/
|
*/
|
||||||
export async function generateOpenAIText(params) {
|
export async function generateOpenAIText(params) {
|
||||||
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
||||||
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
||||||
|
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
@@ -33,15 +23,18 @@ export async function generateOpenAIText(params) {
|
|||||||
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
||||||
}
|
}
|
||||||
|
|
||||||
const openaiClient = getClient(apiKey, baseUrl);
|
const openaiClient = createOpenAI({ apiKey });
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const result = await openaiClient.chat(messages, {
|
const result = await openaiClient.chat(messages, {
|
||||||
|
// Updated: Use openaiClient.chat directly
|
||||||
model: modelId,
|
model: modelId,
|
||||||
max_tokens: maxTokens,
|
max_tokens: maxTokens,
|
||||||
temperature
|
temperature
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
|
||||||
|
// This might need refinement based on testing the SDK's output.
|
||||||
const textContent = result?.choices?.[0]?.message?.content?.trim();
|
const textContent = result?.choices?.[0]?.message?.content?.trim();
|
||||||
|
|
||||||
if (!textContent) {
|
if (!textContent) {
|
||||||
@@ -72,12 +65,12 @@ export async function generateOpenAIText(params) {
|
|||||||
/**
|
/**
|
||||||
* Streams text using OpenAI models via Vercel AI SDK.
|
* Streams text using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
||||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||||
* @throws {Error} If API call fails.
|
* @throws {Error} If API call fails.
|
||||||
*/
|
*/
|
||||||
export async function streamOpenAIText(params) {
|
export async function streamOpenAIText(params) {
|
||||||
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
||||||
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
||||||
|
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
@@ -92,10 +85,12 @@ export async function streamOpenAIText(params) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const openaiClient = getClient(apiKey, baseUrl);
|
const openaiClient = createOpenAI({ apiKey });
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
// Use the streamText function from Vercel AI SDK core
|
||||||
const stream = await openaiClient.chat.stream(messages, {
|
const stream = await openaiClient.chat.stream(messages, {
|
||||||
|
// Updated: Use openaiClient.chat.stream
|
||||||
model: modelId,
|
model: modelId,
|
||||||
max_tokens: maxTokens,
|
max_tokens: maxTokens,
|
||||||
temperature
|
temperature
|
||||||
@@ -105,6 +100,7 @@ export async function streamOpenAIText(params) {
|
|||||||
'debug',
|
'debug',
|
||||||
`OpenAI streamText initiated successfully for model: ${modelId}`
|
`OpenAI streamText initiated successfully for model: ${modelId}`
|
||||||
);
|
);
|
||||||
|
// The Vercel SDK's streamText should directly return the stream object
|
||||||
return stream;
|
return stream;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(
|
log(
|
||||||
@@ -121,7 +117,7 @@ export async function streamOpenAIText(params) {
|
|||||||
/**
|
/**
|
||||||
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
||||||
*
|
*
|
||||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl.
|
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If API call fails or object generation fails.
|
* @throws {Error} If API call fails or object generation fails.
|
||||||
*/
|
*/
|
||||||
@@ -133,8 +129,7 @@ export async function generateOpenAIObject(params) {
|
|||||||
schema,
|
schema,
|
||||||
objectName,
|
objectName,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature
|
||||||
baseUrl
|
|
||||||
} = params;
|
} = params;
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
@@ -150,9 +145,10 @@ export async function generateOpenAIObject(params) {
|
|||||||
if (!objectName)
|
if (!objectName)
|
||||||
throw new Error('Object name is required for OpenAI object generation.');
|
throw new Error('Object name is required for OpenAI object generation.');
|
||||||
|
|
||||||
const openaiClient = getClient(apiKey, baseUrl);
|
const openaiClient = createOpenAI({ apiKey });
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
// Use the imported generateObject function from 'ai' package
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: openaiClient(modelId),
|
model: openaiClient(modelId),
|
||||||
schema: schema,
|
schema: schema,
|
||||||
|
|||||||
@@ -2,14 +2,6 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
|||||||
import { generateText, streamText, generateObject } from 'ai';
|
import { generateText, streamText, generateObject } from 'ai';
|
||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
||||||
|
|
||||||
function getClient(apiKey, baseUrl) {
|
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
|
||||||
return createOpenRouter({
|
|
||||||
apiKey,
|
|
||||||
...(baseUrl && { baseURL: baseUrl })
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates text using an OpenRouter chat model.
|
* Generates text using an OpenRouter chat model.
|
||||||
*
|
*
|
||||||
@@ -19,7 +11,6 @@ function getClient(apiKey, baseUrl) {
|
|||||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||||
* @param {number} [params.temperature] - Sampling temperature.
|
* @param {number} [params.temperature] - Sampling temperature.
|
||||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -29,7 +20,6 @@ async function generateOpenRouterText({
|
|||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
baseUrl,
|
|
||||||
...rest // Capture any other Vercel AI SDK compatible parameters
|
...rest // Capture any other Vercel AI SDK compatible parameters
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -38,7 +28,7 @@ async function generateOpenRouterText({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = getClient(apiKey, baseUrl);
|
const openrouter = createOpenRouter({ apiKey });
|
||||||
const model = openrouter.chat(modelId); // Assuming chat model
|
const model = openrouter.chat(modelId); // Assuming chat model
|
||||||
|
|
||||||
const { text } = await generateText({
|
const { text } = await generateText({
|
||||||
@@ -68,7 +58,6 @@ async function generateOpenRouterText({
|
|||||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||||
* @param {number} [params.temperature] - Sampling temperature.
|
* @param {number} [params.temperature] - Sampling temperature.
|
||||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
|
||||||
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -78,7 +67,6 @@ async function streamOpenRouterText({
|
|||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
baseUrl,
|
|
||||||
...rest
|
...rest
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -87,7 +75,7 @@ async function streamOpenRouterText({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = getClient(apiKey, baseUrl);
|
const openrouter = createOpenRouter({ apiKey });
|
||||||
const model = openrouter.chat(modelId);
|
const model = openrouter.chat(modelId);
|
||||||
|
|
||||||
// Directly return the stream from the Vercel AI SDK function
|
// Directly return the stream from the Vercel AI SDK function
|
||||||
@@ -120,7 +108,6 @@ async function streamOpenRouterText({
|
|||||||
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens.
|
* @param {number} [params.maxTokens] - Maximum tokens.
|
||||||
* @param {number} [params.temperature] - Temperature.
|
* @param {number} [params.temperature] - Temperature.
|
||||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If the API call fails or validation fails.
|
* @throws {Error} If the API call fails or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -133,7 +120,6 @@ async function generateOpenRouterObject({
|
|||||||
maxRetries = 3,
|
maxRetries = 3,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
baseUrl,
|
|
||||||
...rest
|
...rest
|
||||||
}) {
|
}) {
|
||||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||||
@@ -143,7 +129,7 @@ async function generateOpenRouterObject({
|
|||||||
throw new Error('Messages array cannot be empty.');
|
throw new Error('Messages array cannot be empty.');
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const openrouter = getClient(apiKey, baseUrl);
|
const openrouter = createOpenRouter({ apiKey });
|
||||||
const model = openrouter.chat(modelId);
|
const model = openrouter.chat(modelId);
|
||||||
|
|
||||||
const { object } = await generateObject({
|
const { object } = await generateObject({
|
||||||
|
|||||||
@@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js';
|
|||||||
|
|
||||||
// --- Client Instantiation ---
|
// --- Client Instantiation ---
|
||||||
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
||||||
function getClient(apiKey, baseUrl) {
|
function getClient(apiKey) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('Perplexity API key is required.');
|
throw new Error('Perplexity API key is required.');
|
||||||
}
|
}
|
||||||
|
// Create and return a new instance directly
|
||||||
return createPerplexity({
|
return createPerplexity({
|
||||||
apiKey: apiKey,
|
apiKey: apiKey
|
||||||
...(baseUrl && { baseURL: baseUrl })
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,7 +31,6 @@ function getClient(apiKey, baseUrl) {
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -40,12 +39,11 @@ export async function generatePerplexityText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -72,7 +70,6 @@ export async function generatePerplexityText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -81,12 +78,11 @@ export async function streamPerplexityText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
messages: messages,
|
messages: messages,
|
||||||
@@ -116,7 +112,6 @@ export async function streamPerplexityText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If generation or validation fails or is unsupported.
|
* @throws {Error} If generation or validation fails or is unsupported.
|
||||||
*/
|
*/
|
||||||
@@ -128,8 +123,7 @@ export async function generatePerplexityObject({
|
|||||||
objectName = 'generated_object',
|
objectName = 'generated_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 1,
|
maxRetries = 1 // Lower retries as support might be limited
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'debug',
|
'debug',
|
||||||
@@ -140,7 +134,8 @@ export async function generatePerplexityObject({
|
|||||||
'generateObject support for Perplexity might be limited or experimental.'
|
'generateObject support for Perplexity might be limited or experimental.'
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
|
// Attempt using generateObject, but be prepared for potential issues
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId),
|
model: client(modelId),
|
||||||
schema: schema,
|
schema: schema,
|
||||||
|
|||||||
@@ -9,13 +9,14 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh
|
|||||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||||
|
|
||||||
// --- Client Instantiation ---
|
// --- Client Instantiation ---
|
||||||
function getClient(apiKey, baseUrl) {
|
function getClient(apiKey) {
|
||||||
if (!apiKey) {
|
if (!apiKey) {
|
||||||
throw new Error('xAI API key is required.');
|
throw new Error('xAI API key is required.');
|
||||||
}
|
}
|
||||||
|
// Create and return a new instance directly
|
||||||
return createXai({
|
return createXai({
|
||||||
apiKey: apiKey,
|
apiKey: apiKey
|
||||||
...(baseUrl && { baseURL: baseUrl })
|
// Add baseURL or other options if needed later
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -30,7 +31,6 @@ function getClient(apiKey, baseUrl) {
|
|||||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
|
||||||
* @returns {Promise<string>} The generated text content.
|
* @returns {Promise<string>} The generated text content.
|
||||||
* @throws {Error} If the API call fails.
|
* @throws {Error} If the API call fails.
|
||||||
*/
|
*/
|
||||||
@@ -39,14 +39,13 @@ export async function generateXaiText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Generating xAI text with model: ${modelId}`);
|
log('debug', `Generating xAI text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
const result = await generateText({
|
const result = await generateText({
|
||||||
model: client(modelId),
|
model: client(modelId), // Correct model invocation
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
@@ -71,7 +70,6 @@ export async function generateXaiText({
|
|||||||
* @param {Array<object>} params.messages - The messages array.
|
* @param {Array<object>} params.messages - The messages array.
|
||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
|
||||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||||
* @throws {Error} If the API call fails to initiate the stream.
|
* @throws {Error} If the API call fails to initiate the stream.
|
||||||
*/
|
*/
|
||||||
@@ -80,19 +78,18 @@ export async function streamXaiText({
|
|||||||
modelId,
|
modelId,
|
||||||
messages,
|
messages,
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log('debug', `Streaming xAI text with model: ${modelId}`);
|
log('debug', `Streaming xAI text with model: ${modelId}`);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
const stream = await streamText({
|
const stream = await streamText({
|
||||||
model: client(modelId),
|
model: client(modelId), // Correct model invocation
|
||||||
messages: messages,
|
messages: messages,
|
||||||
maxTokens: maxTokens,
|
maxTokens: maxTokens,
|
||||||
temperature: temperature
|
temperature: temperature
|
||||||
});
|
});
|
||||||
return stream;
|
return stream; // Return the full stream object
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
||||||
throw error;
|
throw error;
|
||||||
@@ -113,7 +110,6 @@ export async function streamXaiText({
|
|||||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||||
* @param {number} [params.temperature] - Temperature for generation.
|
* @param {number} [params.temperature] - Temperature for generation.
|
||||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
|
||||||
* @returns {Promise<object>} The generated object matching the schema.
|
* @returns {Promise<object>} The generated object matching the schema.
|
||||||
* @throws {Error} If generation or validation fails.
|
* @throws {Error} If generation or validation fails.
|
||||||
*/
|
*/
|
||||||
@@ -125,17 +121,16 @@ export async function generateXaiObject({
|
|||||||
objectName = 'generated_xai_object',
|
objectName = 'generated_xai_object',
|
||||||
maxTokens,
|
maxTokens,
|
||||||
temperature,
|
temperature,
|
||||||
maxRetries = 3,
|
maxRetries = 3
|
||||||
baseUrl
|
|
||||||
}) {
|
}) {
|
||||||
log(
|
log(
|
||||||
'warn',
|
'warn', // Log warning as this is likely unsupported
|
||||||
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
||||||
);
|
);
|
||||||
try {
|
try {
|
||||||
const client = getClient(apiKey, baseUrl);
|
const client = getClient(apiKey);
|
||||||
const result = await generateObject({
|
const result = await generateObject({
|
||||||
model: client(modelId),
|
model: client(modelId), // Correct model invocation
|
||||||
// Note: mode might need adjustment if xAI ever supports object generation differently
|
// Note: mode might need adjustment if xAI ever supports object generation differently
|
||||||
mode: 'tool',
|
mode: 'tool',
|
||||||
schema: schema,
|
schema: schema,
|
||||||
@@ -158,6 +153,6 @@ export async function generateXaiObject({
|
|||||||
'error',
|
'error',
|
||||||
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
||||||
);
|
);
|
||||||
throw error;
|
throw error; // Re-throw the error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,32 +0,0 @@
|
|||||||
/**
|
|
||||||
* @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Task status options list
|
|
||||||
* @type {TaskStatus[]}
|
|
||||||
* @description Defines possible task statuses:
|
|
||||||
* - pending: Task waiting to start
|
|
||||||
* - done: Task completed
|
|
||||||
* - in-progress: Task in progress
|
|
||||||
* - review: Task completed and waiting for review
|
|
||||||
* - deferred: Task postponed or paused
|
|
||||||
* - cancelled: Task cancelled and will not be completed
|
|
||||||
*/
|
|
||||||
export const TASK_STATUS_OPTIONS = [
|
|
||||||
'pending',
|
|
||||||
'done',
|
|
||||||
'in-progress',
|
|
||||||
'review',
|
|
||||||
'deferred',
|
|
||||||
'cancelled'
|
|
||||||
];
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if a given status is a valid task status
|
|
||||||
* @param {string} status - The status to check
|
|
||||||
* @returns {boolean} True if the status is valid, false otherwise
|
|
||||||
*/
|
|
||||||
export function isValidTaskStatus(status) {
|
|
||||||
return TASK_STATUS_OPTIONS.includes(status);
|
|
||||||
}
|
|
||||||
@@ -8,7 +8,6 @@ const mockGetResearchModelId = jest.fn();
|
|||||||
const mockGetFallbackProvider = jest.fn();
|
const mockGetFallbackProvider = jest.fn();
|
||||||
const mockGetFallbackModelId = jest.fn();
|
const mockGetFallbackModelId = jest.fn();
|
||||||
const mockGetParametersForRole = jest.fn();
|
const mockGetParametersForRole = jest.fn();
|
||||||
const mockGetBaseUrlForRole = jest.fn();
|
|
||||||
|
|
||||||
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||||
getMainProvider: mockGetMainProvider,
|
getMainProvider: mockGetMainProvider,
|
||||||
@@ -17,8 +16,7 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
|||||||
getResearchModelId: mockGetResearchModelId,
|
getResearchModelId: mockGetResearchModelId,
|
||||||
getFallbackProvider: mockGetFallbackProvider,
|
getFallbackProvider: mockGetFallbackProvider,
|
||||||
getFallbackModelId: mockGetFallbackModelId,
|
getFallbackModelId: mockGetFallbackModelId,
|
||||||
getParametersForRole: mockGetParametersForRole,
|
getParametersForRole: mockGetParametersForRole
|
||||||
getBaseUrlForRole: mockGetBaseUrlForRole
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Mock AI Provider Modules
|
// Mock AI Provider Modules
|
||||||
|
|||||||
@@ -199,12 +199,6 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
|||||||
|
|
||||||
// Simplified version of updateSingleTaskStatus for testing
|
// Simplified version of updateSingleTaskStatus for testing
|
||||||
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||||
if (!isValidTaskStatus(newStatus)) {
|
|
||||||
throw new Error(
|
|
||||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's a subtask (e.g., "1.2")
|
// Check if it's a subtask (e.g., "1.2")
|
||||||
if (taskIdInput.includes('.')) {
|
if (taskIdInput.includes('.')) {
|
||||||
const [parentId, subtaskId] = taskIdInput
|
const [parentId, subtaskId] = taskIdInput
|
||||||
@@ -335,10 +329,6 @@ const testAddTask = (
|
|||||||
import * as taskManager from '../../scripts/modules/task-manager.js';
|
import * as taskManager from '../../scripts/modules/task-manager.js';
|
||||||
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
|
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
|
||||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
||||||
import {
|
|
||||||
isValidTaskStatus,
|
|
||||||
TASK_STATUS_OPTIONS
|
|
||||||
} from '../../src/constants/task-status.js';
|
|
||||||
|
|
||||||
// Destructure the required functions for convenience
|
// Destructure the required functions for convenience
|
||||||
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
|
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
|
||||||
@@ -1175,16 +1165,6 @@ describe('Task Manager Module', () => {
|
|||||||
expect(testTasksData.tasks[1].status).toBe('done');
|
expect(testTasksData.tasks[1].status).toBe('done');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('should throw error for invalid status', async () => {
|
|
||||||
// Arrange
|
|
||||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
|
||||||
|
|
||||||
// Assert
|
|
||||||
expect(() =>
|
|
||||||
testUpdateSingleTaskStatus(testTasksData, '2', 'Done')
|
|
||||||
).toThrow(/Error: Invalid status value: Done./);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('should update subtask status', async () => {
|
test('should update subtask status', async () => {
|
||||||
// Arrange
|
// Arrange
|
||||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||||
|
|||||||
Reference in New Issue
Block a user