Compare commits

..

3 Commits

Author SHA1 Message Date
Ralph Khreish
e8f73d1bea Merge remote-tracking branch 'origin/next' into ThomasMldr/main 2025-05-16 11:43:03 +02:00
Thomas Mulder
f9f3a24568 add test + documentation 2025-04-22 14:13:20 +02:00
Thomas Mulder
b1f3796ec7 Add optional ANTHROPIC_API_BASE_URL 2025-04-22 13:41:59 +02:00
39 changed files with 277 additions and 773 deletions

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': minor
---
.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider.

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': patch
---
Fix the error handling of task status settings

View File

@@ -1,7 +0,0 @@
---
'task-master-ai': patch
---
Remove caching layer from MCP direct functions for task listing, next task, and complexity report
- Fixes issues users where having where they were getting stale data

View File

@@ -1,5 +0,0 @@
---
'task-master-ai': minor
---
Display task complexity scores in task lists, next task, and task details views.

View File

@@ -1,9 +1,29 @@
# API Keys (Required for using in any role i.e. main/research/fallback -- see `task-master models`) # API Keys (Required)
ANTHROPIC_API_KEY=YOUR_ANTHROPIC_KEY_HERE ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Format: sk-ant-api03-...
PERPLEXITY_API_KEY=YOUR_PERPLEXITY_KEY_HERE PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Format: pplx-...
OPENAI_API_KEY=YOUR_OPENAI_KEY_HERE OPENAI_API_KEY="your_openai_api_key_here" # Format: sk-...
GOOGLE_API_KEY=YOUR_GOOGLE_KEY_HERE GOOGLE_API_KEY="your_google_api_key_here" # Format: AIza...
MISTRAL_API_KEY=YOUR_MISTRAL_KEY_HERE MISTRAL_API_KEY="your_mistral_api_key_here" # Format: ...
OPENROUTER_API_KEY=YOUR_OPENROUTER_KEY_HERE OPENROUTER_API_KEY="your_openrouter_api_key_here" # Format: sk-or-...
XAI_API_KEY=YOUR_XAI_KEY_HERE XAI_API_KEY="your_xai_api_key_here" # Format: ...
AZURE_OPENAI_API_KEY=YOUR_AZURE_KEY_HERE AZURE_OPENAI_API_KEY="your_azure_key_here" # Format: ...
# API Base URLs (Optional)
ANTHROPIC_API_BASE_URL="optional_base_url_here" # Optional custom base URL for Anthropic API
# Model Configuration
MODEL="claude-3-7-sonnet-20250219" # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229
PERPLEXITY_MODEL="sonar-pro" # Perplexity model for research-backed subtasks
MAX_TOKENS="64000" # Maximum tokens for model responses
TEMPERATURE="0.2" # Temperature for model responses (0.0-1.0)
# Logging Configuration
DEBUG="false" # Enable debug logging (true/false)
LOG_LEVEL="info" # Log level (debug, info, warn, error)
# Task Generation Settings
DEFAULT_SUBTASKS="5" # Default number of subtasks when expanding
DEFAULT_PRIORITY="medium" # Default priority for generated tasks (high, medium, low)
# Project Metadata (Optional)
PROJECT_NAME="Your Project Name" # Override default project name in tasks.json

View File

@@ -30,6 +30,7 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
"args": ["-y", "--package=task-master-ai", "task-master-ai"], "args": ["-y", "--package=task-master-ai", "task-master-ai"],
"env": { "env": {
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE", "ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
"ANTHROPIC_API_BASE_URL": "YOUR_CUSTOM_BASE_URL_HERE (optional)",
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE", "PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE", "OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE", "GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",

View File

@@ -3784,6 +3784,7 @@ In this tutorial, you'll learn how to build a LLM-powered chatbot client that co
if (!ANTHROPIC_API_KEY) { if (!ANTHROPIC_API_KEY) {
throw new Error("ANTHROPIC_API_KEY is not set"); throw new Error("ANTHROPIC_API_KEY is not set");
} }
const ANTHROPIC_API_BASE_URL = process.env.ANTHROPIC_API_BASE_URL;
class MCPClient { class MCPClient {
private mcp: Client; private mcp: Client;
@@ -3794,6 +3795,7 @@ In this tutorial, you'll learn how to build a LLM-powered chatbot client that co
constructor() { constructor() {
this.anthropic = new Anthropic({ this.anthropic = new Anthropic({
apiKey: ANTHROPIC_API_KEY, apiKey: ANTHROPIC_API_KEY,
baseUrl: ANTHROPIC_API_BASE_URL,
}); });
this.mcp = new Client({ name: "mcp-client-cli", version: "1.0.0" }); this.mcp = new Client({ name: "mcp-client-cli", version: "1.0.0" });
} }

View File

@@ -15,15 +15,13 @@ Taskmaster uses two primary methods for configuration:
"provider": "anthropic", "provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219", "modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 64000, "maxTokens": 64000,
"temperature": 0.2, "temperature": 0.2
"baseUrl": "https://api.anthropic.com/v1"
}, },
"research": { "research": {
"provider": "perplexity", "provider": "perplexity",
"modelId": "sonar-pro", "modelId": "sonar-pro",
"maxTokens": 8700, "maxTokens": 8700,
"temperature": 0.1, "temperature": 0.1
"baseUrl": "https://api.perplexity.ai/v1"
}, },
"fallback": { "fallback": {
"provider": "anthropic", "provider": "anthropic",
@@ -58,9 +56,8 @@ Taskmaster uses two primary methods for configuration:
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`). - `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
- `OPENROUTER_API_KEY`: Your OpenRouter API key. - `OPENROUTER_API_KEY`: Your OpenRouter API key.
- `XAI_API_KEY`: Your X-AI API key. - `XAI_API_KEY`: Your X-AI API key.
- **Optional Endpoint Overrides:** - **Optional Endpoint Overrides (in .taskmasterconfig):**
- **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used. - `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key.
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role).
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`). - `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables. **Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.

View File

@@ -8,6 +8,7 @@ import {
enableSilentMode, enableSilentMode,
disableSilentMode disableSilentMode
} from '../../../../scripts/modules/utils.js'; } from '../../../../scripts/modules/utils.js';
import { getCachedOrExecute } from '../../tools/utils.js';
/** /**
* Direct function wrapper for displaying the complexity report with error handling and caching. * Direct function wrapper for displaying the complexity report with error handling and caching.
@@ -85,20 +86,30 @@ export async function complexityReportDirect(args, log) {
// Use the caching utility // Use the caching utility
try { try {
const result = await coreActionFn(); const result = await getCachedOrExecute({
log.info('complexityReportDirect completed'); cacheKey,
return result; actionFn: coreActionFn,
log
});
log.info(
`complexityReportDirect completed. From cache: ${result.fromCache}`
);
return result; // Returns { success, data/error, fromCache }
} catch (error) { } catch (error) {
// Catch unexpected errors from getCachedOrExecute itself
// Ensure silent mode is disabled // Ensure silent mode is disabled
disableSilentMode(); disableSilentMode();
log.error(`Unexpected error during complexityReport: ${error.message}`); log.error(
`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`
);
return { return {
success: false, success: false,
error: { error: {
code: 'UNEXPECTED_ERROR', code: 'UNEXPECTED_ERROR',
message: error.message message: error.message
} },
fromCache: false
}; };
} }
} catch (error) { } catch (error) {

View File

@@ -4,6 +4,7 @@
*/ */
import { listTasks } from '../../../../scripts/modules/task-manager.js'; import { listTasks } from '../../../../scripts/modules/task-manager.js';
import { getCachedOrExecute } from '../../tools/utils.js';
import { import {
enableSilentMode, enableSilentMode,
disableSilentMode disableSilentMode
@@ -18,7 +19,7 @@ import {
*/ */
export async function listTasksDirect(args, log) { export async function listTasksDirect(args, log) {
// Destructure the explicit tasksJsonPath from args // Destructure the explicit tasksJsonPath from args
const { tasksJsonPath, reportPath, status, withSubtasks } = args; const { tasksJsonPath, status, withSubtasks } = args;
if (!tasksJsonPath) { if (!tasksJsonPath) {
log.error('listTasksDirect called without tasksJsonPath'); log.error('listTasksDirect called without tasksJsonPath');
@@ -35,6 +36,7 @@ export async function listTasksDirect(args, log) {
// Use the explicit tasksJsonPath for cache key // Use the explicit tasksJsonPath for cache key
const statusFilter = status || 'all'; const statusFilter = status || 'all';
const withSubtasksFilter = withSubtasks || false; const withSubtasksFilter = withSubtasks || false;
const cacheKey = `listTasks:${tasksJsonPath}:${statusFilter}:${withSubtasksFilter}`;
// Define the action function to be executed on cache miss // Define the action function to be executed on cache miss
const coreListTasksAction = async () => { const coreListTasksAction = async () => {
@@ -49,7 +51,6 @@ export async function listTasksDirect(args, log) {
const resultData = listTasks( const resultData = listTasks(
tasksJsonPath, tasksJsonPath,
statusFilter, statusFilter,
reportPath,
withSubtasksFilter, withSubtasksFilter,
'json' 'json'
); );
@@ -64,7 +65,6 @@ export async function listTasksDirect(args, log) {
} }
}; };
} }
log.info( log.info(
`Core listTasks function retrieved ${resultData.tasks.length} tasks` `Core listTasks function retrieved ${resultData.tasks.length} tasks`
); );
@@ -88,19 +88,25 @@ export async function listTasksDirect(args, log) {
} }
}; };
// Use the caching utility
try { try {
const result = await coreListTasksAction(); const result = await getCachedOrExecute({
log.info('listTasksDirect completed'); cacheKey,
return result; actionFn: coreListTasksAction,
log
});
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) { } catch (error) {
log.error(`Unexpected error during listTasks: ${error.message}`); // Catch unexpected errors from getCachedOrExecute itself (though unlikely)
log.error(
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
);
console.error(error.stack); console.error(error.stack);
return { return {
success: false, success: false,
error: { error: { code: 'CACHE_UTIL_ERROR', message: error.message },
code: 'UNEXPECTED_ERROR', fromCache: false
message: error.message
}
}; };
} }
} }

View File

@@ -4,10 +4,8 @@
*/ */
import { findNextTask } from '../../../../scripts/modules/task-manager.js'; import { findNextTask } from '../../../../scripts/modules/task-manager.js';
import { import { readJSON } from '../../../../scripts/modules/utils.js';
readJSON, import { getCachedOrExecute } from '../../tools/utils.js';
readComplexityReport
} from '../../../../scripts/modules/utils.js';
import { import {
enableSilentMode, enableSilentMode,
disableSilentMode disableSilentMode
@@ -23,7 +21,7 @@ import {
*/ */
export async function nextTaskDirect(args, log) { export async function nextTaskDirect(args, log) {
// Destructure expected args // Destructure expected args
const { tasksJsonPath, reportPath } = args; const { tasksJsonPath } = args;
if (!tasksJsonPath) { if (!tasksJsonPath) {
log.error('nextTaskDirect called without tasksJsonPath'); log.error('nextTaskDirect called without tasksJsonPath');
@@ -37,6 +35,9 @@ export async function nextTaskDirect(args, log) {
}; };
} }
// Generate cache key using the provided task path
const cacheKey = `nextTask:${tasksJsonPath}`;
// Define the action function to be executed on cache miss // Define the action function to be executed on cache miss
const coreNextTaskAction = async () => { const coreNextTaskAction = async () => {
try { try {
@@ -58,11 +59,8 @@ export async function nextTaskDirect(args, log) {
}; };
} }
// Read the complexity report
const complexityReport = readComplexityReport(reportPath);
// Find the next task // Find the next task
const nextTask = findNextTask(data.tasks, complexityReport); const nextTask = findNextTask(data.tasks);
if (!nextTask) { if (!nextTask) {
log.info( log.info(
@@ -120,11 +118,18 @@ export async function nextTaskDirect(args, log) {
// Use the caching utility // Use the caching utility
try { try {
const result = await coreNextTaskAction(); const result = await getCachedOrExecute({
log.info(`nextTaskDirect completed.`); cacheKey,
return result; actionFn: coreNextTaskAction,
log
});
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
return result; // Returns { success, data/error, fromCache }
} catch (error) { } catch (error) {
log.error(`Unexpected error during nextTask: ${error.message}`); // Catch unexpected errors from getCachedOrExecute itself
log.error(
`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`
);
return { return {
success: false, success: false,
error: { error: {

View File

@@ -3,10 +3,11 @@
* Direct function implementation for showing task details * Direct function implementation for showing task details
*/ */
import { findTaskById, readJSON } from '../../../../scripts/modules/utils.js';
import { getCachedOrExecute } from '../../tools/utils.js';
import { import {
findTaskById, enableSilentMode,
readComplexityReport, disableSilentMode
readJSON
} from '../../../../scripts/modules/utils.js'; } from '../../../../scripts/modules/utils.js';
import { findTasksJsonPath } from '../utils/path-utils.js'; import { findTasksJsonPath } from '../utils/path-utils.js';
@@ -16,7 +17,6 @@ import { findTasksJsonPath } from '../utils/path-utils.js';
* @param {Object} args - Command arguments. * @param {Object} args - Command arguments.
* @param {string} args.id - Task ID to show. * @param {string} args.id - Task ID to show.
* @param {string} [args.file] - Optional path to the tasks file (passed to findTasksJsonPath). * @param {string} [args.file] - Optional path to the tasks file (passed to findTasksJsonPath).
* @param {string} args.reportPath - Explicit path to the complexity report file.
* @param {string} [args.status] - Optional status to filter subtasks by. * @param {string} [args.status] - Optional status to filter subtasks by.
* @param {string} args.projectRoot - Absolute path to the project root directory (already normalized by tool). * @param {string} args.projectRoot - Absolute path to the project root directory (already normalized by tool).
* @param {Object} log - Logger object. * @param {Object} log - Logger object.
@@ -27,7 +27,7 @@ export async function showTaskDirect(args, log) {
// Destructure session from context if needed later, otherwise ignore // Destructure session from context if needed later, otherwise ignore
// const { session } = context; // const { session } = context;
// Destructure projectRoot and other args. projectRoot is assumed normalized. // Destructure projectRoot and other args. projectRoot is assumed normalized.
const { id, file, reportPath, status, projectRoot } = args; const { id, file, status, projectRoot } = args;
log.info( log.info(
`Showing task direct function. ID: ${id}, File: ${file}, Status Filter: ${status}, ProjectRoot: ${projectRoot}` `Showing task direct function. ID: ${id}, File: ${file}, Status Filter: ${status}, ProjectRoot: ${projectRoot}`
@@ -64,12 +64,9 @@ export async function showTaskDirect(args, log) {
}; };
} }
const complexityReport = readComplexityReport(reportPath);
const { task, originalSubtaskCount } = findTaskById( const { task, originalSubtaskCount } = findTaskById(
tasksData.tasks, tasksData.tasks,
id, id,
complexityReport,
status status
); );

View File

@@ -339,49 +339,6 @@ export function findPRDDocumentPath(projectRoot, explicitPath, log) {
return null; return null;
} }
export function findComplexityReportPath(projectRoot, explicitPath, log) {
// If explicit path is provided, check if it exists
if (explicitPath) {
const fullPath = path.isAbsolute(explicitPath)
? explicitPath
: path.resolve(projectRoot, explicitPath);
if (fs.existsSync(fullPath)) {
log.info(`Using provided PRD document path: ${fullPath}`);
return fullPath;
} else {
log.warn(
`Provided PRD document path not found: ${fullPath}, will search for alternatives`
);
}
}
// Common locations and file patterns for PRD documents
const commonLocations = [
'', // Project root
'scripts/'
];
const commonFileNames = [
'complexity-report.json',
'task-complexity-report.json'
];
// Check all possible combinations
for (const location of commonLocations) {
for (const fileName of commonFileNames) {
const potentialPath = path.join(projectRoot, location, fileName);
if (fs.existsSync(potentialPath)) {
log.info(`Found PRD document at: ${potentialPath}`);
return potentialPath;
}
}
}
log.warn(`No PRD document found in common locations within ${projectRoot}`);
return null;
}
/** /**
* Resolves the tasks output directory path * Resolves the tasks output directory path
* @param {string} projectRoot - The project root directory * @param {string} projectRoot - The project root directory

View File

@@ -10,10 +10,7 @@ import {
withNormalizedProjectRoot withNormalizedProjectRoot
} from './utils.js'; } from './utils.js';
import { showTaskDirect } from '../core/task-master-core.js'; import { showTaskDirect } from '../core/task-master-core.js';
import { import { findTasksJsonPath } from '../core/utils/path-utils.js';
findTasksJsonPath,
findComplexityReportPath
} from '../core/utils/path-utils.js';
/** /**
* Custom processor function that removes allTasks from the response * Custom processor function that removes allTasks from the response
@@ -53,12 +50,6 @@ export function registerShowTaskTool(server) {
.string() .string()
.optional() .optional()
.describe('Path to the tasks file relative to project root'), .describe('Path to the tasks file relative to project root'),
complexityReport: z
.string()
.optional()
.describe(
'Path to the complexity report file (relative to project root or absolute)'
),
projectRoot: z projectRoot: z
.string() .string()
.optional() .optional()
@@ -90,22 +81,9 @@ export function registerShowTaskTool(server) {
} }
// Call the direct function, passing the normalized projectRoot // Call the direct function, passing the normalized projectRoot
// Resolve the path to complexity report
let complexityReportPath;
try {
complexityReportPath = findComplexityReportPath(
projectRoot,
args.complexityReport,
log
);
} catch (error) {
log.error(`Error finding complexity report: ${error.message}`);
}
const result = await showTaskDirect( const result = await showTaskDirect(
{ {
tasksJsonPath: tasksJsonPath, tasksJsonPath: tasksJsonPath,
reportPath: complexityReportPath,
// Pass other relevant args
id: id, id: id,
status: status, status: status,
projectRoot: projectRoot projectRoot: projectRoot

View File

@@ -10,10 +10,7 @@ import {
withNormalizedProjectRoot withNormalizedProjectRoot
} from './utils.js'; } from './utils.js';
import { listTasksDirect } from '../core/task-master-core.js'; import { listTasksDirect } from '../core/task-master-core.js';
import { import { findTasksJsonPath } from '../core/utils/path-utils.js';
findTasksJsonPath,
findComplexityReportPath
} from '../core/utils/path-utils.js';
/** /**
* Register the getTasks tool with the MCP server * Register the getTasks tool with the MCP server
@@ -41,12 +38,6 @@ export function registerListTasksTool(server) {
.describe( .describe(
'Path to the tasks file (relative to project root or absolute)' 'Path to the tasks file (relative to project root or absolute)'
), ),
complexityReport: z
.string()
.optional()
.describe(
'Path to the complexity report file (relative to project root or absolute)'
),
projectRoot: z projectRoot: z
.string() .string()
.describe('The directory of the project. Must be an absolute path.') .describe('The directory of the project. Must be an absolute path.')
@@ -69,23 +60,11 @@ export function registerListTasksTool(server) {
); );
} }
// Resolve the path to complexity report
let complexityReportPath;
try {
complexityReportPath = findComplexityReportPath(
args.projectRoot,
args.complexityReport,
log
);
} catch (error) {
log.error(`Error finding complexity report: ${error.message}`);
}
const result = await listTasksDirect( const result = await listTasksDirect(
{ {
tasksJsonPath: tasksJsonPath, tasksJsonPath: tasksJsonPath,
status: args.status, status: args.status,
withSubtasks: args.withSubtasks, withSubtasks: args.withSubtasks
reportPath: complexityReportPath
}, },
log log
); );

View File

@@ -10,10 +10,7 @@ import {
withNormalizedProjectRoot withNormalizedProjectRoot
} from './utils.js'; } from './utils.js';
import { nextTaskDirect } from '../core/task-master-core.js'; import { nextTaskDirect } from '../core/task-master-core.js';
import { import { findTasksJsonPath } from '../core/utils/path-utils.js';
findTasksJsonPath,
findComplexityReportPath
} from '../core/utils/path-utils.js';
/** /**
* Register the next-task tool with the MCP server * Register the next-task tool with the MCP server
@@ -26,12 +23,6 @@ export function registerNextTaskTool(server) {
'Find the next task to work on based on dependencies and status', 'Find the next task to work on based on dependencies and status',
parameters: z.object({ parameters: z.object({
file: z.string().optional().describe('Absolute path to the tasks file'), file: z.string().optional().describe('Absolute path to the tasks file'),
complexityReport: z
.string()
.optional()
.describe(
'Path to the complexity report file (relative to project root or absolute)'
),
projectRoot: z projectRoot: z
.string() .string()
.describe('The directory of the project. Must be an absolute path.') .describe('The directory of the project. Must be an absolute path.')
@@ -54,21 +45,9 @@ export function registerNextTaskTool(server) {
); );
} }
// Resolve the path to complexity report
let complexityReportPath;
try {
complexityReportPath = findComplexityReportPath(
args.projectRoot,
args.complexityReport,
log
);
} catch (error) {
log.error(`Error finding complexity report: ${error.message}`);
}
const result = await nextTaskDirect( const result = await nextTaskDirect(
{ {
tasksJsonPath: tasksJsonPath, tasksJsonPath: tasksJsonPath
reportPath: complexityReportPath
}, },
log log
); );

View File

@@ -11,7 +11,6 @@ import {
} from './utils.js'; } from './utils.js';
import { setTaskStatusDirect } from '../core/task-master-core.js'; import { setTaskStatusDirect } from '../core/task-master-core.js';
import { findTasksJsonPath } from '../core/utils/path-utils.js'; import { findTasksJsonPath } from '../core/utils/path-utils.js';
import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';
/** /**
* Register the setTaskStatus tool with the MCP server * Register the setTaskStatus tool with the MCP server
@@ -28,7 +27,7 @@ export function registerSetTaskStatusTool(server) {
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once." "Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
), ),
status: z status: z
.enum(TASK_STATUS_OPTIONS) .string()
.describe( .describe(
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'." "New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
), ),

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{ {
"name": "task-master-ai", "name": "task-master-ai",
"version": "0.13.2", "version": "0.12",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "task-master-ai", "name": "task-master-ai",
"version": "0.13.2", "version": "0.12",
"license": "MIT WITH Commons-Clause", "license": "MIT WITH Commons-Clause",
"dependencies": { "dependencies": {
"@ai-sdk/anthropic": "^1.2.10", "@ai-sdk/anthropic": "^1.2.10",

View File

@@ -14,8 +14,7 @@ import {
getResearchModelId, getResearchModelId,
getFallbackProvider, getFallbackProvider,
getFallbackModelId, getFallbackModelId,
getParametersForRole, getParametersForRole
getBaseUrlForRole
} from './config-manager.js'; } from './config-manager.js';
import { log, resolveEnvVariable, findProjectRoot } from './utils.js'; import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
@@ -285,13 +284,7 @@ async function _unifiedServiceRunner(serviceType, params) {
'AI service call failed for all configured roles.'; 'AI service call failed for all configured roles.';
for (const currentRole of sequence) { for (const currentRole of sequence) {
let providerName, let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
modelId,
apiKey,
roleParams,
providerFnSet,
providerApiFn,
baseUrl;
try { try {
log('info', `New AI service call with role: ${currentRole}`); log('info', `New AI service call with role: ${currentRole}`);
@@ -332,7 +325,6 @@ async function _unifiedServiceRunner(serviceType, params) {
// Pass effectiveProjectRoot to getParametersForRole // Pass effectiveProjectRoot to getParametersForRole
roleParams = getParametersForRole(currentRole, effectiveProjectRoot); roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
// 2. Get Provider Function Set // 2. Get Provider Function Set
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()]; providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
@@ -409,7 +401,6 @@ async function _unifiedServiceRunner(serviceType, params) {
maxTokens: roleParams.maxTokens, maxTokens: roleParams.maxTokens,
temperature: roleParams.temperature, temperature: roleParams.temperature,
messages, messages,
baseUrl,
...(serviceType === 'generateObject' && { schema, objectName }), ...(serviceType === 'generateObject' && { schema, objectName }),
...restApiParams ...restApiParams
}; };

View File

@@ -73,10 +73,6 @@ import {
getApiKeyStatusReport getApiKeyStatusReport
} from './task-manager/models.js'; } from './task-manager/models.js';
import { findProjectRoot } from './utils.js'; import { findProjectRoot } from './utils.js';
import {
isValidTaskStatus,
TASK_STATUS_OPTIONS
} from '../../src/constants/task-status.js';
import { getTaskMasterVersion } from '../../src/utils/getVersion.js'; import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
/** /**
* Runs the interactive setup process for model configuration. * Runs the interactive setup process for model configuration.
@@ -1037,7 +1033,7 @@ function registerCommands(programInstance) {
) )
.option( .option(
'-s, --status <status>', '-s, --status <status>',
`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})` 'New status (todo, in-progress, review, done)'
) )
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json') .option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.action(async (options) => { .action(async (options) => {
@@ -1050,16 +1046,6 @@ function registerCommands(programInstance) {
process.exit(1); process.exit(1);
} }
if (!isValidTaskStatus(status)) {
console.error(
chalk.red(
`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
)
);
process.exit(1);
}
console.log( console.log(
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`) chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
); );
@@ -1072,16 +1058,10 @@ function registerCommands(programInstance) {
.command('list') .command('list')
.description('List all tasks') .description('List all tasks')
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json') .option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.option(
'-r, --report <report>',
'Path to the complexity report file',
'scripts/task-complexity-report.json'
)
.option('-s, --status <status>', 'Filter by status') .option('-s, --status <status>', 'Filter by status')
.option('--with-subtasks', 'Show subtasks for each task') .option('--with-subtasks', 'Show subtasks for each task')
.action(async (options) => { .action(async (options) => {
const tasksPath = options.file; const tasksPath = options.file;
const reportPath = options.report;
const statusFilter = options.status; const statusFilter = options.status;
const withSubtasks = options.withSubtasks || false; const withSubtasks = options.withSubtasks || false;
@@ -1093,7 +1073,7 @@ function registerCommands(programInstance) {
console.log(chalk.blue('Including subtasks in listing')); console.log(chalk.blue('Including subtasks in listing'));
} }
await listTasks(tasksPath, statusFilter, reportPath, withSubtasks); await listTasks(tasksPath, statusFilter, withSubtasks);
}); });
// expand command // expand command
@@ -1399,15 +1379,9 @@ function registerCommands(programInstance) {
`Show the next task to work on based on dependencies and status${chalk.reset('')}` `Show the next task to work on based on dependencies and status${chalk.reset('')}`
) )
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json') .option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.option(
'-r, --report <report>',
'Path to the complexity report file',
'scripts/task-complexity-report.json'
)
.action(async (options) => { .action(async (options) => {
const tasksPath = options.file; const tasksPath = options.file;
const reportPath = options.report; await displayNextTask(tasksPath);
await displayNextTask(tasksPath, reportPath);
}); });
// show command // show command
@@ -1420,11 +1394,6 @@ function registerCommands(programInstance) {
.option('-i, --id <id>', 'Task ID to show') .option('-i, --id <id>', 'Task ID to show')
.option('-s, --status <status>', 'Filter subtasks by status') // ADDED status option .option('-s, --status <status>', 'Filter subtasks by status') // ADDED status option
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json') .option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
.option(
'-r, --report <report>',
'Path to the complexity report file',
'scripts/task-complexity-report.json'
)
.action(async (taskId, options) => { .action(async (taskId, options) => {
const idArg = taskId || options.id; const idArg = taskId || options.id;
const statusFilter = options.status; // ADDED: Capture status filter const statusFilter = options.status; // ADDED: Capture status filter
@@ -1435,9 +1404,8 @@ function registerCommands(programInstance) {
} }
const tasksPath = options.file; const tasksPath = options.file;
const reportPath = options.report;
// PASS statusFilter to the display function // PASS statusFilter to the display function
await displayTaskById(tasksPath, idArg, reportPath, statusFilter); await displayTaskById(tasksPath, idArg, statusFilter);
}); });
// add-dependency command // add-dependency command

View File

@@ -677,13 +677,6 @@ function getAllProviders() {
return Object.keys(MODEL_MAP || {}); return Object.keys(MODEL_MAP || {});
} }
function getBaseUrlForRole(role, explicitRoot = null) {
const roleConfig = getModelConfigForRole(role, explicitRoot);
return roleConfig && typeof roleConfig.baseUrl === 'string'
? roleConfig.baseUrl
: undefined;
}
export { export {
// Core config access // Core config access
getConfig, getConfig,
@@ -711,7 +704,6 @@ export {
getFallbackModelId, getFallbackModelId,
getFallbackMaxTokens, getFallbackMaxTokens,
getFallbackTemperature, getFallbackTemperature,
getBaseUrlForRole,
// Global setting getters (No env var overrides) // Global setting getters (No env var overrides)
getLogLevel, getLogLevel,

View File

@@ -23,7 +23,7 @@ import updateSubtaskById from './task-manager/update-subtask-by-id.js';
import removeTask from './task-manager/remove-task.js'; import removeTask from './task-manager/remove-task.js';
import taskExists from './task-manager/task-exists.js'; import taskExists from './task-manager/task-exists.js';
import isTaskDependentOn from './task-manager/is-task-dependent.js'; import isTaskDependentOn from './task-manager/is-task-dependent.js';
import { readComplexityReport } from './utils.js';
// Export task manager functions // Export task manager functions
export { export {
parsePRD, parsePRD,
@@ -45,6 +45,5 @@ export {
removeTask, removeTask,
findTaskById, findTaskById,
taskExists, taskExists,
isTaskDependentOn, isTaskDependentOn
readComplexityReport
}; };

View File

@@ -1,6 +1,3 @@
import { log } from '../utils.js';
import { addComplexityToTask } from '../utils.js';
/** /**
* Return the next work item: * Return the next work item:
* • Prefer an eligible SUBTASK that belongs to any parent task * • Prefer an eligible SUBTASK that belongs to any parent task
@@ -18,10 +15,9 @@ import { addComplexityToTask } from '../utils.js';
* ─ parentId → number (present only when it's a subtask) * ─ parentId → number (present only when it's a subtask)
* *
* @param {Object[]} tasks full array of top-level tasks, each may contain .subtasks[] * @param {Object[]} tasks full array of top-level tasks, each may contain .subtasks[]
* @param {Object} [complexityReport=null] - Optional complexity report object
* @returns {Object|null} next work item or null if nothing is eligible * @returns {Object|null} next work item or null if nothing is eligible
*/ */
function findNextTask(tasks, complexityReport = null) { function findNextTask(tasks) {
// ---------- helpers ---------------------------------------------------- // ---------- helpers ----------------------------------------------------
const priorityValues = { high: 3, medium: 2, low: 1 }; const priorityValues = { high: 3, medium: 2, low: 1 };
@@ -95,14 +91,7 @@ function findNextTask(tasks, complexityReport = null) {
if (aPar !== bPar) return aPar - bPar; if (aPar !== bPar) return aPar - bPar;
return aSub - bSub; return aSub - bSub;
}); });
const nextTask = candidateSubtasks[0]; return candidateSubtasks[0];
// Add complexity to the task before returning
if (nextTask && complexityReport) {
addComplexityToTask(nextTask, complexityReport);
}
return nextTask;
} }
// ---------- 2) fall back to top-level tasks (original logic) ------------ // ---------- 2) fall back to top-level tasks (original logic) ------------
@@ -127,11 +116,6 @@ function findNextTask(tasks, complexityReport = null) {
return a.id - b.id; return a.id - b.id;
})[0]; })[0];
// Add complexity to the task before returning
if (nextTask && complexityReport) {
addComplexityToTask(nextTask, complexityReport);
}
return nextTask; return nextTask;
} }

View File

@@ -2,20 +2,13 @@ import chalk from 'chalk';
import boxen from 'boxen'; import boxen from 'boxen';
import Table from 'cli-table3'; import Table from 'cli-table3';
import { import { log, readJSON, truncate } from '../utils.js';
log,
readJSON,
truncate,
readComplexityReport,
addComplexityToTask
} from '../utils.js';
import findNextTask from './find-next-task.js'; import findNextTask from './find-next-task.js';
import { import {
displayBanner, displayBanner,
getStatusWithColor, getStatusWithColor,
formatDependenciesWithStatus, formatDependenciesWithStatus,
getComplexityWithColor,
createProgressBar createProgressBar
} from '../ui.js'; } from '../ui.js';
@@ -23,7 +16,6 @@ import {
* List all tasks * List all tasks
* @param {string} tasksPath - Path to the tasks.json file * @param {string} tasksPath - Path to the tasks.json file
* @param {string} statusFilter - Filter by status * @param {string} statusFilter - Filter by status
* @param {string} reportPath - Path to the complexity report
* @param {boolean} withSubtasks - Whether to show subtasks * @param {boolean} withSubtasks - Whether to show subtasks
* @param {string} outputFormat - Output format (text or json) * @param {string} outputFormat - Output format (text or json)
* @returns {Object} - Task list result for json format * @returns {Object} - Task list result for json format
@@ -31,7 +23,6 @@ import {
function listTasks( function listTasks(
tasksPath, tasksPath,
statusFilter, statusFilter,
reportPath = null,
withSubtasks = false, withSubtasks = false,
outputFormat = 'text' outputFormat = 'text'
) { ) {
@@ -46,13 +37,6 @@ function listTasks(
throw new Error(`No valid tasks found in ${tasksPath}`); throw new Error(`No valid tasks found in ${tasksPath}`);
} }
// Add complexity scores to tasks if report exists
const complexityReport = readComplexityReport(reportPath);
// Apply complexity scores to tasks
if (complexityReport && complexityReport.complexityAnalysis) {
data.tasks.forEach((task) => addComplexityToTask(task, complexityReport));
}
// Filter tasks by status if specified // Filter tasks by status if specified
const filteredTasks = const filteredTasks =
statusFilter && statusFilter.toLowerCase() !== 'all' // <-- Added check for 'all' statusFilter && statusFilter.toLowerCase() !== 'all' // <-- Added check for 'all'
@@ -273,8 +257,8 @@ function listTasks(
); );
const avgDependenciesPerTask = totalDependencies / data.tasks.length; const avgDependenciesPerTask = totalDependencies / data.tasks.length;
// Find next task to work on, passing the complexity report // Find next task to work on
const nextItem = findNextTask(data.tasks, complexityReport); const nextItem = findNextTask(data.tasks);
// Get terminal width - more reliable method // Get terminal width - more reliable method
let terminalWidth; let terminalWidth;
@@ -317,11 +301,8 @@ function listTasks(
`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\n\n` + `${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\n\n` +
chalk.cyan.bold('Next Task to Work On:') + chalk.cyan.bold('Next Task to Work On:') +
'\n' + '\n' +
`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')} `ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}\n` +
` + `Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : ''}`;
`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : ''}
` +
`Complexity: ${nextItem && nextItem.complexityScore ? getComplexityWithColor(nextItem.complexityScore) : chalk.gray('N/A')}`;
// Calculate width for side-by-side display // Calculate width for side-by-side display
// Box borders, padding take approximately 4 chars on each side // Box borders, padding take approximately 4 chars on each side
@@ -431,16 +412,9 @@ function listTasks(
// Make dependencies column smaller as requested (-20%) // Make dependencies column smaller as requested (-20%)
const depsWidthPct = 20; const depsWidthPct = 20;
const complexityWidthPct = 10;
// Calculate title/description width as remaining space (+20% from dependencies reduction) // Calculate title/description width as remaining space (+20% from dependencies reduction)
const titleWidthPct = const titleWidthPct =
100 - 100 - idWidthPct - statusWidthPct - priorityWidthPct - depsWidthPct;
idWidthPct -
statusWidthPct -
priorityWidthPct -
depsWidthPct -
complexityWidthPct;
// Allow 10 characters for borders and padding // Allow 10 characters for borders and padding
const availableWidth = terminalWidth - 10; const availableWidth = terminalWidth - 10;
@@ -450,9 +424,6 @@ function listTasks(
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100)); const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100)); const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100)); const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
const complexityWidth = Math.floor(
availableWidth * (complexityWidthPct / 100)
);
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100)); const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
// Create a table with correct borders and spacing // Create a table with correct borders and spacing
@@ -462,17 +433,9 @@ function listTasks(
chalk.cyan.bold('Title'), chalk.cyan.bold('Title'),
chalk.cyan.bold('Status'), chalk.cyan.bold('Status'),
chalk.cyan.bold('Priority'), chalk.cyan.bold('Priority'),
chalk.cyan.bold('Dependencies'), chalk.cyan.bold('Dependencies')
chalk.cyan.bold('Complexity')
],
colWidths: [
idWidth,
titleWidth,
statusWidth,
priorityWidth,
depsWidth,
complexityWidth // Added complexity column width
], ],
colWidths: [idWidth, titleWidth, statusWidth, priorityWidth, depsWidth],
style: { style: {
head: [], // No special styling for header head: [], // No special styling for header
border: [], // No special styling for border border: [], // No special styling for border
@@ -491,8 +454,7 @@ function listTasks(
depText = formatDependenciesWithStatus( depText = formatDependenciesWithStatus(
task.dependencies, task.dependencies,
data.tasks, data.tasks,
true, true
complexityReport
); );
} else { } else {
depText = chalk.gray('None'); depText = chalk.gray('None');
@@ -518,10 +480,7 @@ function listTasks(
truncate(cleanTitle, titleWidth - 3), truncate(cleanTitle, titleWidth - 3),
status, status,
priorityColor(truncate(task.priority || 'medium', priorityWidth - 2)), priorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),
depText, depText // No truncation for dependencies
task.complexityScore
? getComplexityWithColor(task.complexityScore)
: chalk.gray('N/A')
]); ]);
// Add subtasks if requested // Add subtasks if requested
@@ -557,8 +516,6 @@ function listTasks(
// Default to regular task dependency // Default to regular task dependency
const depTask = data.tasks.find((t) => t.id === depId); const depTask = data.tasks.find((t) => t.id === depId);
if (depTask) { if (depTask) {
// Add complexity to depTask before checking status
addComplexityToTask(depTask, complexityReport);
const isDone = const isDone =
depTask.status === 'done' || depTask.status === 'completed'; depTask.status === 'done' || depTask.status === 'completed';
const isInProgress = depTask.status === 'in-progress'; const isInProgress = depTask.status === 'in-progress';
@@ -584,10 +541,7 @@ function listTasks(
chalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`), chalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),
getStatusWithColor(subtask.status, true), getStatusWithColor(subtask.status, true),
chalk.dim('-'), chalk.dim('-'),
subtaskDepText, subtaskDepText // No truncation for dependencies
subtask.complexityScore
? chalk.gray(`${subtask.complexityScore}`)
: chalk.gray('N/A')
]); ]);
}); });
} }
@@ -643,8 +597,6 @@ function listTasks(
subtasksSection = `\n\n${chalk.white.bold('Subtasks:')}\n`; subtasksSection = `\n\n${chalk.white.bold('Subtasks:')}\n`;
subtasksSection += parentTaskForSubtasks.subtasks subtasksSection += parentTaskForSubtasks.subtasks
.map((subtask) => { .map((subtask) => {
// Add complexity to subtask before display
addComplexityToTask(subtask, complexityReport);
// Using a more simplified format for subtask status display // Using a more simplified format for subtask status display
const status = subtask.status || 'pending'; const status = subtask.status || 'pending';
const statusColors = { const statusColors = {
@@ -673,8 +625,8 @@ function listTasks(
'\n\n' + '\n\n' +
// Use nextItem.priority, nextItem.status, nextItem.dependencies // Use nextItem.priority, nextItem.status, nextItem.dependencies
`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\n` + `${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\n` +
`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : chalk.gray('None')}\n\n` + `${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : chalk.gray('None')}\n\n` +
// Use nextTask.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this) // Use nextItem.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
// *** Fetching original item for description and details *** // *** Fetching original item for description and details ***
`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` + `${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +
subtasksSection + // <-- Subtasks are handled above now subtasksSection + // <-- Subtasks are handled above now

View File

@@ -8,10 +8,6 @@ import { validateTaskDependencies } from '../dependency-manager.js';
import { getDebugFlag } from '../config-manager.js'; import { getDebugFlag } from '../config-manager.js';
import updateSingleTaskStatus from './update-single-task-status.js'; import updateSingleTaskStatus from './update-single-task-status.js';
import generateTaskFiles from './generate-task-files.js'; import generateTaskFiles from './generate-task-files.js';
import {
isValidTaskStatus,
TASK_STATUS_OPTIONS
} from '../../../src/constants/task-status.js';
/** /**
* Set the status of a task * Set the status of a task
@@ -23,11 +19,6 @@ import {
*/ */
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) { async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
try { try {
if (!isValidTaskStatus(newStatus)) {
throw new Error(
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
);
}
// Determine if we're in MCP mode by checking for mcpLog // Determine if we're in MCP mode by checking for mcpLog
const isMcpMode = !!options?.mcpLog; const isMcpMode = !!options?.mcpLog;

View File

@@ -1,7 +1,6 @@
import chalk from 'chalk'; import chalk from 'chalk';
import { log } from '../utils.js'; import { log } from '../utils.js';
import { isValidTaskStatus } from '../../../src/constants/task-status.js';
/** /**
* Update the status of a single task * Update the status of a single task
@@ -18,12 +17,6 @@ async function updateSingleTaskStatus(
data, data,
showUi = true showUi = true
) { ) {
if (!isValidTaskStatus(newStatus)) {
throw new Error(
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
);
}
// Check if it's a subtask (e.g., "1.2") // Check if it's a subtask (e.g., "1.2")
if (taskIdInput.includes('.')) { if (taskIdInput.includes('.')) {
const [parentId, subtaskId] = taskIdInput const [parentId, subtaskId] = taskIdInput

View File

@@ -17,13 +17,8 @@ import {
isSilentMode isSilentMode
} from './utils.js'; } from './utils.js';
import fs from 'fs'; import fs from 'fs';
import { import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
findNextTask,
analyzeTaskComplexity,
readComplexityReport
} from './task-manager.js';
import { getProjectName, getDefaultSubtasks } from './config-manager.js'; import { getProjectName, getDefaultSubtasks } from './config-manager.js';
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
import { getTaskMasterVersion } from '../../src/utils/getVersion.js'; import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
// Create a color gradient for the banner // Create a color gradient for the banner
@@ -268,14 +263,12 @@ function getStatusWithColor(status, forTable = false) {
* @param {Array} dependencies - Array of dependency IDs * @param {Array} dependencies - Array of dependency IDs
* @param {Array} allTasks - Array of all tasks * @param {Array} allTasks - Array of all tasks
* @param {boolean} forConsole - Whether the output is for console display * @param {boolean} forConsole - Whether the output is for console display
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
* @returns {string} Formatted dependencies string * @returns {string} Formatted dependencies string
*/ */
function formatDependenciesWithStatus( function formatDependenciesWithStatus(
dependencies, dependencies,
allTasks, allTasks,
forConsole = false, forConsole = false
complexityReport = null // Add complexityReport parameter
) { ) {
if ( if (
!dependencies || !dependencies ||
@@ -339,11 +332,7 @@ function formatDependenciesWithStatus(
typeof depId === 'string' ? parseInt(depId, 10) : depId; typeof depId === 'string' ? parseInt(depId, 10) : depId;
// Look up the task using the numeric ID // Look up the task using the numeric ID
const depTaskResult = findTaskById( const depTaskResult = findTaskById(allTasks, numericDepId);
allTasks,
numericDepId,
complexityReport
);
const depTask = depTaskResult.task; // Access the task object from the result const depTask = depTaskResult.task; // Access the task object from the result
if (!depTask) { if (!depTask) {
@@ -459,7 +448,7 @@ function displayHelp() {
{ {
name: 'set-status', name: 'set-status',
args: '--id=<id> --status=<status>', args: '--id=<id> --status=<status>',
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})` desc: 'Update task status (done, pending, etc.)'
}, },
{ {
name: 'update', name: 'update',
@@ -762,7 +751,7 @@ function truncateString(str, maxLength) {
* Display the next task to work on * Display the next task to work on
* @param {string} tasksPath - Path to the tasks.json file * @param {string} tasksPath - Path to the tasks.json file
*/ */
async function displayNextTask(tasksPath, complexityReportPath = null) { async function displayNextTask(tasksPath) {
displayBanner(); displayBanner();
// Read the tasks file // Read the tasks file
@@ -772,11 +761,8 @@ async function displayNextTask(tasksPath, complexityReportPath = null) {
process.exit(1); process.exit(1);
} }
// Read complexity report once
const complexityReport = readComplexityReport(complexityReportPath);
// Find the next task // Find the next task
const nextTask = findNextTask(data.tasks, complexityReport); const nextTask = findNextTask(data.tasks);
if (!nextTask) { if (!nextTask) {
console.log( console.log(
@@ -837,18 +823,7 @@ async function displayNextTask(tasksPath, complexityReportPath = null) {
], ],
[ [
chalk.cyan.bold('Dependencies:'), chalk.cyan.bold('Dependencies:'),
formatDependenciesWithStatus( formatDependenciesWithStatus(nextTask.dependencies, data.tasks, true)
nextTask.dependencies,
data.tasks,
true,
complexityReport
)
],
[
chalk.cyan.bold('Complexity:'),
nextTask.complexityScore
? getComplexityWithColor(nextTask.complexityScore)
: chalk.gray('N/A')
], ],
[chalk.cyan.bold('Description:'), nextTask.description] [chalk.cyan.bold('Description:'), nextTask.description]
); );
@@ -1016,12 +991,7 @@ async function displayNextTask(tasksPath, complexityReportPath = null) {
* @param {string|number} taskId - The ID of the task to display * @param {string|number} taskId - The ID of the task to display
* @param {string} [statusFilter] - Optional status to filter subtasks by * @param {string} [statusFilter] - Optional status to filter subtasks by
*/ */
async function displayTaskById( async function displayTaskById(tasksPath, taskId, statusFilter = null) {
tasksPath,
taskId,
complexityReportPath = null,
statusFilter = null
) {
displayBanner(); displayBanner();
// Read the tasks file // Read the tasks file
@@ -1031,15 +1001,11 @@ async function displayTaskById(
process.exit(1); process.exit(1);
} }
// Read complexity report once
const complexityReport = readComplexityReport(complexityReportPath);
// Find the task by ID, applying the status filter if provided // Find the task by ID, applying the status filter if provided
// Returns { task, originalSubtaskCount, originalSubtasks } // Returns { task, originalSubtaskCount, originalSubtasks }
const { task, originalSubtaskCount, originalSubtasks } = findTaskById( const { task, originalSubtaskCount, originalSubtasks } = findTaskById(
data.tasks, data.tasks,
taskId, taskId,
complexityReport,
statusFilter statusFilter
); );
@@ -1094,12 +1060,6 @@ async function displayTaskById(
chalk.cyan.bold('Status:'), chalk.cyan.bold('Status:'),
getStatusWithColor(task.status || 'pending', true) getStatusWithColor(task.status || 'pending', true)
], ],
[
chalk.cyan.bold('Complexity:'),
task.complexityScore
? getComplexityWithColor(task.complexityScore)
: chalk.gray('N/A')
],
[ [
chalk.cyan.bold('Description:'), chalk.cyan.bold('Description:'),
task.description || 'No description provided.' task.description || 'No description provided.'
@@ -1178,18 +1138,7 @@ async function displayTaskById(
[chalk.cyan.bold('Priority:'), priorityColor(task.priority || 'medium')], [chalk.cyan.bold('Priority:'), priorityColor(task.priority || 'medium')],
[ [
chalk.cyan.bold('Dependencies:'), chalk.cyan.bold('Dependencies:'),
formatDependenciesWithStatus( formatDependenciesWithStatus(task.dependencies, data.tasks, true)
task.dependencies,
data.tasks,
true,
complexityReport
)
],
[
chalk.cyan.bold('Complexity:'),
task.complexityScore
? getComplexityWithColor(task.complexityScore)
: chalk.gray('N/A')
], ],
[chalk.cyan.bold('Description:'), task.description] [chalk.cyan.bold('Description:'), task.description]
); );

View File

@@ -275,22 +275,6 @@ function findTaskInComplexityReport(report, taskId) {
return report.complexityAnalysis.find((task) => task.taskId === taskId); return report.complexityAnalysis.find((task) => task.taskId === taskId);
} }
function addComplexityToTask(task, complexityReport) {
let taskId;
if (task.isSubtask) {
taskId = task.parentTask.id;
} else if (task.parentId) {
taskId = task.parentId;
} else {
taskId = task.id;
}
const taskAnalysis = findTaskInComplexityReport(complexityReport, taskId);
if (taskAnalysis) {
task.complexityScore = taskAnalysis.complexityScore;
}
}
/** /**
* Checks if a task exists in the tasks array * Checks if a task exists in the tasks array
* @param {Array} tasks - The tasks array * @param {Array} tasks - The tasks array
@@ -341,17 +325,10 @@ function formatTaskId(id) {
* Finds a task by ID in the tasks array. Optionally filters subtasks by status. * Finds a task by ID in the tasks array. Optionally filters subtasks by status.
* @param {Array} tasks - The tasks array * @param {Array} tasks - The tasks array
* @param {string|number} taskId - The task ID to find * @param {string|number} taskId - The task ID to find
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
* @returns {Object|null} The task object or null if not found
* @param {string} [statusFilter] - Optional status to filter subtasks by * @param {string} [statusFilter] - Optional status to filter subtasks by
* @returns {{task: Object|null, originalSubtaskCount: number|null}} The task object (potentially with filtered subtasks) and the original subtask count if filtered, or nulls if not found. * @returns {{task: Object|null, originalSubtaskCount: number|null}} The task object (potentially with filtered subtasks) and the original subtask count if filtered, or nulls if not found.
*/ */
function findTaskById( function findTaskById(tasks, taskId, statusFilter = null) {
tasks,
taskId,
complexityReport = null,
statusFilter = null
) {
if (!taskId || !tasks || !Array.isArray(tasks)) { if (!taskId || !tasks || !Array.isArray(tasks)) {
return { task: null, originalSubtaskCount: null }; return { task: null, originalSubtaskCount: null };
} }
@@ -379,17 +356,10 @@ function findTaskById(
subtask.isSubtask = true; subtask.isSubtask = true;
} }
// If we found a task, check for complexity data // Return the found subtask (or null) and null for originalSubtaskCount
if (subtask && complexityReport) {
addComplexityToTask(subtask, complexityReport);
}
return { task: subtask || null, originalSubtaskCount: null }; return { task: subtask || null, originalSubtaskCount: null };
} }
let taskResult = null;
let originalSubtaskCount = null;
// Find the main task // Find the main task
const id = parseInt(taskId, 10); const id = parseInt(taskId, 10);
const task = tasks.find((t) => t.id === id) || null; const task = tasks.find((t) => t.id === id) || null;
@@ -399,8 +369,6 @@ function findTaskById(
return { task: null, originalSubtaskCount: null }; return { task: null, originalSubtaskCount: null };
} }
taskResult = task;
// If task found and statusFilter provided, filter its subtasks // If task found and statusFilter provided, filter its subtasks
if (statusFilter && task.subtasks && Array.isArray(task.subtasks)) { if (statusFilter && task.subtasks && Array.isArray(task.subtasks)) {
const originalSubtaskCount = task.subtasks.length; const originalSubtaskCount = task.subtasks.length;
@@ -411,18 +379,12 @@ function findTaskById(
subtask.status && subtask.status &&
subtask.status.toLowerCase() === statusFilter.toLowerCase() subtask.status.toLowerCase() === statusFilter.toLowerCase()
); );
// Return the filtered task and the original count
taskResult = filteredTask; return { task: filteredTask, originalSubtaskCount: originalSubtaskCount };
originalSubtaskCount = originalSubtaskCount;
} }
// If task found and complexityReport provided, add complexity data // Return original task and null count if no filter or no subtasks
if (taskResult && complexityReport) { return { task: task, originalSubtaskCount: null };
addComplexityToTask(taskResult, complexityReport);
}
// Return the found task and original subtask count
return { task: taskResult, originalSubtaskCount };
} }
/** /**
@@ -562,11 +524,10 @@ export {
findCycles, findCycles,
toKebabCase, toKebabCase,
detectCamelCaseFlags, detectCamelCaseFlags,
disableSilentMode,
enableSilentMode, enableSilentMode,
getTaskManager, disableSilentMode,
isSilentMode, isSilentMode,
addComplexityToTask,
resolveEnvVariable, resolveEnvVariable,
getTaskManager,
findProjectRoot findProjectRoot
}; };

View File

@@ -5,7 +5,7 @@
* using the Vercel AI SDK. * using the Vercel AI SDK.
*/ */
import { createAnthropic } from '@ai-sdk/anthropic'; import { createAnthropic } from '@ai-sdk/anthropic';
import { generateText, streamText, generateObject } from 'ai'; import { generateText, streamText, generateObject, streamObject } from 'ai';
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
// TODO: Implement standardized functions for generateText, streamText, generateObject // TODO: Implement standardized functions for generateText, streamText, generateObject
@@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
// Remove the global variable and caching logic // Remove the global variable and caching logic
// let anthropicClient; // let anthropicClient;
function getClient(apiKey, baseUrl) { function getClient(apiKey) {
if (!apiKey) { if (!apiKey) {
// In a real scenario, this would use the config resolver. // In a real scenario, this would use the config resolver.
// Throwing error here if key isn't passed for simplicity. // Throwing error here if key isn't passed for simplicity.
@@ -30,12 +30,14 @@ function getClient(apiKey, baseUrl) {
// Create and return a new instance directly with standard version header // Create and return a new instance directly with standard version header
return createAnthropic({ return createAnthropic({
apiKey: apiKey, apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl }), baseURL: 'https://api.anthropic.com/v1',
// Use standard version header instead of beta // Use standard version header instead of beta
headers: { headers: {
'anthropic-beta': 'output-128k-2025-02-19' 'anthropic-beta': 'output-128k-2025-02-19'
} }
}); });
// }
// return anthropicClient;
} }
// --- Standardized Service Function Implementations --- // --- Standardized Service Function Implementations ---
@@ -49,7 +51,6 @@ function getClient(apiKey, baseUrl) {
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]). * @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<string>} The generated text content. * @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails. * @throws {Error} If the API call fails.
*/ */
@@ -58,12 +59,11 @@ export async function generateAnthropicText({
modelId, modelId,
messages, messages,
maxTokens, maxTokens,
temperature, temperature
baseUrl
}) { }) {
log('debug', `Generating Anthropic text with model: ${modelId}`); log('debug', `Generating Anthropic text with model: ${modelId}`);
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
const result = await generateText({ const result = await generateText({
model: client(modelId), model: client(modelId),
messages: messages, messages: messages,
@@ -93,7 +93,6 @@ export async function generateAnthropicText({
* @param {Array<object>} params.messages - The messages array. * @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK. * @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream. * @throws {Error} If the API call fails to initiate the stream.
*/ */
@@ -102,20 +101,20 @@ export async function streamAnthropicText({
modelId, modelId,
messages, messages,
maxTokens, maxTokens,
temperature, temperature
baseUrl
}) { }) {
log('debug', `Streaming Anthropic text with model: ${modelId}`); log('debug', `Streaming Anthropic text with model: ${modelId}`);
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
// --- DEBUG LOGGING --- >>
log( log(
'debug', 'debug',
'[streamAnthropicText] Parameters received by streamText:', '[streamAnthropicText] Parameters received by streamText:',
JSON.stringify( JSON.stringify(
{ {
modelId: modelId, modelId: modelId, // Log modelId being used
messages: messages, messages: messages, // Log the messages array
maxTokens: maxTokens, maxTokens: maxTokens,
temperature: temperature temperature: temperature
}, },
@@ -123,19 +122,25 @@ export async function streamAnthropicText({
2 2
) )
); );
// --- << DEBUG LOGGING ---
const stream = await streamText({ const stream = await streamText({
model: client(modelId), model: client(modelId),
messages: messages, messages: messages,
maxTokens: maxTokens, maxTokens: maxTokens,
temperature: temperature temperature: temperature
// Beta header moved to client initialization
// TODO: Add other relevant parameters // TODO: Add other relevant parameters
}); });
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream *** // *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
return stream; return stream;
} catch (error) { } catch (error) {
log('error', `Anthropic streamText failed: ${error.message}`, error.stack); log(
'error',
`Anthropic streamText failed: ${error.message}`,
error.stack // Log stack trace for more details
);
throw error; throw error;
} }
} }
@@ -155,7 +160,6 @@ export async function streamAnthropicText({
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation. * @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
* @returns {Promise<object>} The generated object matching the schema. * @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails. * @throws {Error} If generation or validation fails.
*/ */
@@ -167,22 +171,24 @@ export async function generateAnthropicObject({
objectName = 'generated_object', objectName = 'generated_object',
maxTokens, maxTokens,
temperature, temperature,
maxRetries = 3, maxRetries = 3
baseUrl
}) { }) {
log( log(
'debug', 'debug',
`Generating Anthropic object ('${objectName}') with model: ${modelId}` `Generating Anthropic object ('${objectName}') with model: ${modelId}`
); );
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
// Log basic debug info
log( log(
'debug', 'debug',
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}` `Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
); );
const result = await generateObject({ const result = await generateObject({
model: client(modelId), model: client(modelId),
mode: 'tool', mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
schema: schema, schema: schema,
messages: messages, messages: messages,
tool: { tool: {
@@ -193,12 +199,14 @@ export async function generateAnthropicObject({
temperature: temperature, temperature: temperature,
maxRetries: maxRetries maxRetries: maxRetries
}); });
log( log(
'debug', 'debug',
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}` `Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
); );
return result.object; return result.object;
} catch (error) { } catch (error) {
// Simple error logging
log( log(
'error', 'error',
`Anthropic generateObject ('${objectName}') failed: ${error.message}` `Anthropic generateObject ('${objectName}') failed: ${error.message}`

View File

@@ -12,16 +12,6 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('Google API key is required.');
}
return createGoogleGenerativeAI({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
/** /**
* Generates text using a Google AI model. * Generates text using a Google AI model.
* *
@@ -39,8 +29,7 @@ async function generateGoogleText({
modelId = DEFAULT_MODEL, modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE, temperature = DEFAULT_TEMPERATURE,
messages, messages,
maxTokens, maxTokens // Note: Vercel SDK might handle this differently, needs verification
baseUrl
}) { }) {
if (!apiKey) { if (!apiKey) {
throw new Error('Google API key is required.'); throw new Error('Google API key is required.');
@@ -48,21 +37,28 @@ async function generateGoogleText({
log('info', `Generating text with Google model: ${modelId}`); log('info', `Generating text with Google model: ${modelId}`);
try { try {
const googleProvider = getClient(apiKey, baseUrl); // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
const model = googleProvider(modelId); const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
const model = googleProvider(modelId); // Correct model retrieval
// Construct payload suitable for Vercel SDK's generateText
// Note: The exact structure might depend on how messages are passed
const result = await generateText({ const result = await generateText({
model, model, // Pass the model instance
messages, messages, // Pass the messages array directly
temperature, temperature,
maxOutputTokens: maxTokens maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available
}); });
return result.text;
// Assuming result structure provides text directly or within a property
return result.text; // Adjust based on actual SDK response
} catch (error) { } catch (error) {
log( log(
'error', 'error',
`Error generating text with Google (${modelId}): ${error.message}` `Error generating text with Google (${modelId}): ${error.message}`
); );
throw error; throw error; // Re-throw for unified service handler
} }
} }
@@ -83,8 +79,7 @@ async function streamGoogleText({
modelId = DEFAULT_MODEL, modelId = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE, temperature = DEFAULT_TEMPERATURE,
messages, messages,
maxTokens, maxTokens
baseUrl
}) { }) {
if (!apiKey) { if (!apiKey) {
throw new Error('Google API key is required.'); throw new Error('Google API key is required.');
@@ -92,15 +87,19 @@ async function streamGoogleText({
log('info', `Streaming text with Google model: ${modelId}`); log('info', `Streaming text with Google model: ${modelId}`);
try { try {
const googleProvider = getClient(apiKey, baseUrl); // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
const model = googleProvider(modelId); const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
const model = googleProvider(modelId); // Correct model retrieval
const stream = await streamText({ const stream = await streamText({
model, model, // Pass the model instance
messages, messages,
temperature, temperature,
maxOutputTokens: maxTokens maxOutputTokens: maxTokens
}); });
return stream;
return stream; // Return the stream directly
} catch (error) { } catch (error) {
log( log(
'error', 'error',
@@ -131,8 +130,7 @@ async function generateGoogleObject({
messages, messages,
schema, schema,
objectName, // Note: Vercel SDK might use this differently or not at all objectName, // Note: Vercel SDK might use this differently or not at all
maxTokens, maxTokens
baseUrl
}) { }) {
if (!apiKey) { if (!apiKey) {
throw new Error('Google API key is required.'); throw new Error('Google API key is required.');
@@ -140,16 +138,23 @@ async function generateGoogleObject({
log('info', `Generating object with Google model: ${modelId}`); log('info', `Generating object with Google model: ${modelId}`);
try { try {
const googleProvider = getClient(apiKey, baseUrl); // const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
const model = googleProvider(modelId); const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
const model = googleProvider(modelId); // Correct model retrieval
const { object } = await generateObject({ const { object } = await generateObject({
model, model, // Pass the model instance
schema, schema,
messages, messages,
temperature, temperature,
maxOutputTokens: maxTokens maxOutputTokens: maxTokens
// Note: 'objectName' or 'mode' might not be directly applicable here
// depending on how `@ai-sdk/google` handles `generateObject`.
// Check SDK docs if specific tool calling/JSON mode needs explicit setup.
}); });
return object;
return object; // Return the parsed object
} catch (error) { } catch (error) {
log( log(
'error', 'error',

View File

@@ -1,26 +1,16 @@
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
import { generateObject } from 'ai'; // Import necessary functions from 'ai' import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai'
import { log } from '../../scripts/modules/utils.js'; import { log } from '../../scripts/modules/utils.js';
function getClient(apiKey, baseUrl) {
if (!apiKey) {
throw new Error('OpenAI API key is required.');
}
return createOpenAI({
apiKey: apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
/** /**
* Generates text using OpenAI models via Vercel AI SDK. * Generates text using OpenAI models via Vercel AI SDK.
* *
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl. * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
* @returns {Promise<string>} The generated text content. * @returns {Promise<string>} The generated text content.
* @throws {Error} If API call fails. * @throws {Error} If API call fails.
*/ */
export async function generateOpenAIText(params) { export async function generateOpenAIText(params) {
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params; const { apiKey, modelId, messages, maxTokens, temperature } = params;
log('debug', `generateOpenAIText called with model: ${modelId}`); log('debug', `generateOpenAIText called with model: ${modelId}`);
if (!apiKey) { if (!apiKey) {
@@ -33,15 +23,18 @@ export async function generateOpenAIText(params) {
throw new Error('Invalid or empty messages array provided for OpenAI.'); throw new Error('Invalid or empty messages array provided for OpenAI.');
} }
const openaiClient = getClient(apiKey, baseUrl); const openaiClient = createOpenAI({ apiKey });
try { try {
const result = await openaiClient.chat(messages, { const result = await openaiClient.chat(messages, {
// Updated: Use openaiClient.chat directly
model: modelId, model: modelId,
max_tokens: maxTokens, max_tokens: maxTokens,
temperature temperature
}); });
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
// This might need refinement based on testing the SDK's output.
const textContent = result?.choices?.[0]?.message?.content?.trim(); const textContent = result?.choices?.[0]?.message?.content?.trim();
if (!textContent) { if (!textContent) {
@@ -72,12 +65,12 @@ export async function generateOpenAIText(params) {
/** /**
* Streams text using OpenAI models via Vercel AI SDK. * Streams text using OpenAI models via Vercel AI SDK.
* *
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl. * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
* @returns {Promise<ReadableStream>} A readable stream of text deltas. * @returns {Promise<ReadableStream>} A readable stream of text deltas.
* @throws {Error} If API call fails. * @throws {Error} If API call fails.
*/ */
export async function streamOpenAIText(params) { export async function streamOpenAIText(params) {
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params; const { apiKey, modelId, messages, maxTokens, temperature } = params;
log('debug', `streamOpenAIText called with model: ${modelId}`); log('debug', `streamOpenAIText called with model: ${modelId}`);
if (!apiKey) { if (!apiKey) {
@@ -92,10 +85,12 @@ export async function streamOpenAIText(params) {
); );
} }
const openaiClient = getClient(apiKey, baseUrl); const openaiClient = createOpenAI({ apiKey });
try { try {
// Use the streamText function from Vercel AI SDK core
const stream = await openaiClient.chat.stream(messages, { const stream = await openaiClient.chat.stream(messages, {
// Updated: Use openaiClient.chat.stream
model: modelId, model: modelId,
max_tokens: maxTokens, max_tokens: maxTokens,
temperature temperature
@@ -105,6 +100,7 @@ export async function streamOpenAIText(params) {
'debug', 'debug',
`OpenAI streamText initiated successfully for model: ${modelId}` `OpenAI streamText initiated successfully for model: ${modelId}`
); );
// The Vercel SDK's streamText should directly return the stream object
return stream; return stream;
} catch (error) { } catch (error) {
log( log(
@@ -121,7 +117,7 @@ export async function streamOpenAIText(params) {
/** /**
* Generates structured objects using OpenAI models via Vercel AI SDK. * Generates structured objects using OpenAI models via Vercel AI SDK.
* *
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl. * @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
* @returns {Promise<object>} The generated object matching the schema. * @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If API call fails or object generation fails. * @throws {Error} If API call fails or object generation fails.
*/ */
@@ -133,8 +129,7 @@ export async function generateOpenAIObject(params) {
schema, schema,
objectName, objectName,
maxTokens, maxTokens,
temperature, temperature
baseUrl
} = params; } = params;
log( log(
'debug', 'debug',
@@ -150,9 +145,10 @@ export async function generateOpenAIObject(params) {
if (!objectName) if (!objectName)
throw new Error('Object name is required for OpenAI object generation.'); throw new Error('Object name is required for OpenAI object generation.');
const openaiClient = getClient(apiKey, baseUrl); const openaiClient = createOpenAI({ apiKey });
try { try {
// Use the imported generateObject function from 'ai' package
const result = await generateObject({ const result = await generateObject({
model: openaiClient(modelId), model: openaiClient(modelId),
schema: schema, schema: schema,

View File

@@ -2,14 +2,6 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
import { generateText, streamText, generateObject } from 'ai'; import { generateText, streamText, generateObject } from 'ai';
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
function getClient(apiKey, baseUrl) {
if (!apiKey) throw new Error('OpenRouter API key is required.');
return createOpenRouter({
apiKey,
...(baseUrl && { baseURL: baseUrl })
});
}
/** /**
* Generates text using an OpenRouter chat model. * Generates text using an OpenRouter chat model.
* *
@@ -19,7 +11,6 @@ function getClient(apiKey, baseUrl) {
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant). * @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
* @param {number} [params.maxTokens] - Maximum tokens to generate. * @param {number} [params.maxTokens] - Maximum tokens to generate.
* @param {number} [params.temperature] - Sampling temperature. * @param {number} [params.temperature] - Sampling temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<string>} The generated text content. * @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails. * @throws {Error} If the API call fails.
*/ */
@@ -29,7 +20,6 @@ async function generateOpenRouterText({
messages, messages,
maxTokens, maxTokens,
temperature, temperature,
baseUrl,
...rest // Capture any other Vercel AI SDK compatible parameters ...rest // Capture any other Vercel AI SDK compatible parameters
}) { }) {
if (!apiKey) throw new Error('OpenRouter API key is required.'); if (!apiKey) throw new Error('OpenRouter API key is required.');
@@ -38,7 +28,7 @@ async function generateOpenRouterText({
throw new Error('Messages array cannot be empty.'); throw new Error('Messages array cannot be empty.');
try { try {
const openrouter = getClient(apiKey, baseUrl); const openrouter = createOpenRouter({ apiKey });
const model = openrouter.chat(modelId); // Assuming chat model const model = openrouter.chat(modelId); // Assuming chat model
const { text } = await generateText({ const { text } = await generateText({
@@ -68,7 +58,6 @@ async function generateOpenRouterText({
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant). * @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
* @param {number} [params.maxTokens] - Maximum tokens to generate. * @param {number} [params.maxTokens] - Maximum tokens to generate.
* @param {number} [params.temperature] - Sampling temperature. * @param {number} [params.temperature] - Sampling temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas. * @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
* @throws {Error} If the API call fails. * @throws {Error} If the API call fails.
*/ */
@@ -78,7 +67,6 @@ async function streamOpenRouterText({
messages, messages,
maxTokens, maxTokens,
temperature, temperature,
baseUrl,
...rest ...rest
}) { }) {
if (!apiKey) throw new Error('OpenRouter API key is required.'); if (!apiKey) throw new Error('OpenRouter API key is required.');
@@ -87,7 +75,7 @@ async function streamOpenRouterText({
throw new Error('Messages array cannot be empty.'); throw new Error('Messages array cannot be empty.');
try { try {
const openrouter = getClient(apiKey, baseUrl); const openrouter = createOpenRouter({ apiKey });
const model = openrouter.chat(modelId); const model = openrouter.chat(modelId);
// Directly return the stream from the Vercel AI SDK function // Directly return the stream from the Vercel AI SDK function
@@ -120,7 +108,6 @@ async function streamOpenRouterText({
* @param {number} [params.maxRetries=3] - Max retries for object generation. * @param {number} [params.maxRetries=3] - Max retries for object generation.
* @param {number} [params.maxTokens] - Maximum tokens. * @param {number} [params.maxTokens] - Maximum tokens.
* @param {number} [params.temperature] - Temperature. * @param {number} [params.temperature] - Temperature.
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
* @returns {Promise<object>} The generated object matching the schema. * @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If the API call fails or validation fails. * @throws {Error} If the API call fails or validation fails.
*/ */
@@ -133,7 +120,6 @@ async function generateOpenRouterObject({
maxRetries = 3, maxRetries = 3,
maxTokens, maxTokens,
temperature, temperature,
baseUrl,
...rest ...rest
}) { }) {
if (!apiKey) throw new Error('OpenRouter API key is required.'); if (!apiKey) throw new Error('OpenRouter API key is required.');
@@ -143,7 +129,7 @@ async function generateOpenRouterObject({
throw new Error('Messages array cannot be empty.'); throw new Error('Messages array cannot be empty.');
try { try {
const openrouter = getClient(apiKey, baseUrl); const openrouter = createOpenRouter({ apiKey });
const model = openrouter.chat(modelId); const model = openrouter.chat(modelId);
const { object } = await generateObject({ const { object } = await generateObject({

View File

@@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js';
// --- Client Instantiation --- // --- Client Instantiation ---
// Similar to Anthropic, this expects the resolved API key to be passed in. // Similar to Anthropic, this expects the resolved API key to be passed in.
function getClient(apiKey, baseUrl) { function getClient(apiKey) {
if (!apiKey) { if (!apiKey) {
throw new Error('Perplexity API key is required.'); throw new Error('Perplexity API key is required.');
} }
// Create and return a new instance directly
return createPerplexity({ return createPerplexity({
apiKey: apiKey, apiKey: apiKey
...(baseUrl && { baseURL: baseUrl })
}); });
} }
@@ -31,7 +31,6 @@ function getClient(apiKey, baseUrl) {
* @param {Array<object>} params.messages - The messages array. * @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<string>} The generated text content. * @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails. * @throws {Error} If the API call fails.
*/ */
@@ -40,12 +39,11 @@ export async function generatePerplexityText({
modelId, modelId,
messages, messages,
maxTokens, maxTokens,
temperature, temperature
baseUrl
}) { }) {
log('debug', `Generating Perplexity text with model: ${modelId}`); log('debug', `Generating Perplexity text with model: ${modelId}`);
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
const result = await generateText({ const result = await generateText({
model: client(modelId), model: client(modelId),
messages: messages, messages: messages,
@@ -72,7 +70,6 @@ export async function generatePerplexityText({
* @param {Array<object>} params.messages - The messages array. * @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK. * @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream. * @throws {Error} If the API call fails to initiate the stream.
*/ */
@@ -81,12 +78,11 @@ export async function streamPerplexityText({
modelId, modelId,
messages, messages,
maxTokens, maxTokens,
temperature, temperature
baseUrl
}) { }) {
log('debug', `Streaming Perplexity text with model: ${modelId}`); log('debug', `Streaming Perplexity text with model: ${modelId}`);
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
const stream = await streamText({ const stream = await streamText({
model: client(modelId), model: client(modelId),
messages: messages, messages: messages,
@@ -116,7 +112,6 @@ export async function streamPerplexityText({
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation. * @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
* @returns {Promise<object>} The generated object matching the schema. * @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails or is unsupported. * @throws {Error} If generation or validation fails or is unsupported.
*/ */
@@ -128,8 +123,7 @@ export async function generatePerplexityObject({
objectName = 'generated_object', objectName = 'generated_object',
maxTokens, maxTokens,
temperature, temperature,
maxRetries = 1, maxRetries = 1 // Lower retries as support might be limited
baseUrl
}) { }) {
log( log(
'debug', 'debug',
@@ -140,7 +134,8 @@ export async function generatePerplexityObject({
'generateObject support for Perplexity might be limited or experimental.' 'generateObject support for Perplexity might be limited or experimental.'
); );
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
// Attempt using generateObject, but be prepared for potential issues
const result = await generateObject({ const result = await generateObject({
model: client(modelId), model: client(modelId),
schema: schema, schema: schema,

View File

@@ -9,13 +9,14 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
// --- Client Instantiation --- // --- Client Instantiation ---
function getClient(apiKey, baseUrl) { function getClient(apiKey) {
if (!apiKey) { if (!apiKey) {
throw new Error('xAI API key is required.'); throw new Error('xAI API key is required.');
} }
// Create and return a new instance directly
return createXai({ return createXai({
apiKey: apiKey, apiKey: apiKey
...(baseUrl && { baseURL: baseUrl }) // Add baseURL or other options if needed later
}); });
} }
@@ -30,7 +31,6 @@ function getClient(apiKey, baseUrl) {
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]). * @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<string>} The generated text content. * @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails. * @throws {Error} If the API call fails.
*/ */
@@ -39,14 +39,13 @@ export async function generateXaiText({
modelId, modelId,
messages, messages,
maxTokens, maxTokens,
temperature, temperature
baseUrl
}) { }) {
log('debug', `Generating xAI text with model: ${modelId}`); log('debug', `Generating xAI text with model: ${modelId}`);
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
const result = await generateText({ const result = await generateText({
model: client(modelId), model: client(modelId), // Correct model invocation
messages: messages, messages: messages,
maxTokens: maxTokens, maxTokens: maxTokens,
temperature: temperature temperature: temperature
@@ -71,7 +70,6 @@ export async function generateXaiText({
* @param {Array<object>} params.messages - The messages array. * @param {Array<object>} params.messages - The messages array.
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK. * @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
* @throws {Error} If the API call fails to initiate the stream. * @throws {Error} If the API call fails to initiate the stream.
*/ */
@@ -80,19 +78,18 @@ export async function streamXaiText({
modelId, modelId,
messages, messages,
maxTokens, maxTokens,
temperature, temperature
baseUrl
}) { }) {
log('debug', `Streaming xAI text with model: ${modelId}`); log('debug', `Streaming xAI text with model: ${modelId}`);
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
const stream = await streamText({ const stream = await streamText({
model: client(modelId), model: client(modelId), // Correct model invocation
messages: messages, messages: messages,
maxTokens: maxTokens, maxTokens: maxTokens,
temperature: temperature temperature: temperature
}); });
return stream; return stream; // Return the full stream object
} catch (error) { } catch (error) {
log('error', `xAI streamText failed: ${error.message}`, error.stack); log('error', `xAI streamText failed: ${error.message}`, error.stack);
throw error; throw error;
@@ -113,7 +110,6 @@ export async function streamXaiText({
* @param {number} [params.maxTokens] - Maximum tokens for the response. * @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation. * @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation. * @param {number} [params.maxRetries] - Max retries for validation/generation.
* @param {string} [params.baseUrl] - The base URL for the xAI API.
* @returns {Promise<object>} The generated object matching the schema. * @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails. * @throws {Error} If generation or validation fails.
*/ */
@@ -125,17 +121,16 @@ export async function generateXaiObject({
objectName = 'generated_xai_object', objectName = 'generated_xai_object',
maxTokens, maxTokens,
temperature, temperature,
maxRetries = 3, maxRetries = 3
baseUrl
}) { }) {
log( log(
'warn', 'warn', // Log warning as this is likely unsupported
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.` `Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
); );
try { try {
const client = getClient(apiKey, baseUrl); const client = getClient(apiKey);
const result = await generateObject({ const result = await generateObject({
model: client(modelId), model: client(modelId), // Correct model invocation
// Note: mode might need adjustment if xAI ever supports object generation differently // Note: mode might need adjustment if xAI ever supports object generation differently
mode: 'tool', mode: 'tool',
schema: schema, schema: schema,
@@ -158,6 +153,6 @@ export async function generateXaiObject({
'error', 'error',
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)` `xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
); );
throw error; throw error; // Re-throw the error
} }
} }

View File

@@ -1,32 +0,0 @@
/**
* @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus
*/
/**
* Task status options list
* @type {TaskStatus[]}
* @description Defines possible task statuses:
* - pending: Task waiting to start
* - done: Task completed
* - in-progress: Task in progress
* - review: Task completed and waiting for review
* - deferred: Task postponed or paused
* - cancelled: Task cancelled and will not be completed
*/
export const TASK_STATUS_OPTIONS = [
'pending',
'done',
'in-progress',
'review',
'deferred',
'cancelled'
];
/**
* Check if a given status is a valid task status
* @param {string} status - The status to check
* @returns {boolean} True if the status is valid, false otherwise
*/
export function isValidTaskStatus(status) {
return TASK_STATUS_OPTIONS.includes(status);
}

View File

@@ -3,8 +3,9 @@
*/ */
import { jest } from '@jest/globals'; import { jest } from '@jest/globals';
import path, { dirname } from 'path'; import path from 'path';
import { fileURLToPath } from 'url'; import { fileURLToPath } from 'url';
import { dirname } from 'path';
// Get the current module's directory // Get the current module's directory
const __filename = fileURLToPath(import.meta.url); const __filename = fileURLToPath(import.meta.url);
@@ -26,7 +27,6 @@ const mockReadJSON = jest.fn();
const mockWriteJSON = jest.fn(); const mockWriteJSON = jest.fn();
const mockEnableSilentMode = jest.fn(); const mockEnableSilentMode = jest.fn();
const mockDisableSilentMode = jest.fn(); const mockDisableSilentMode = jest.fn();
const mockReadComplexityReport = jest.fn().mockReturnValue(null);
const mockGetAnthropicClient = jest.fn().mockReturnValue({}); const mockGetAnthropicClient = jest.fn().mockReturnValue({});
const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({}); const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({});
@@ -130,7 +130,6 @@ jest.mock('../../../scripts/modules/utils.js', () => ({
writeJSON: mockWriteJSON, writeJSON: mockWriteJSON,
enableSilentMode: mockEnableSilentMode, enableSilentMode: mockEnableSilentMode,
disableSilentMode: mockDisableSilentMode, disableSilentMode: mockDisableSilentMode,
readComplexityReport: mockReadComplexityReport,
CONFIG: { CONFIG: {
model: 'claude-3-7-sonnet-20250219', model: 'claude-3-7-sonnet-20250219',
maxTokens: 64000, maxTokens: 64000,
@@ -161,6 +160,15 @@ jest.mock('../../../scripts/modules/task-manager.js', () => ({
})); }));
// Import dependencies after mocks are set up // Import dependencies after mocks are set up
import fs from 'fs';
import {
readJSON,
writeJSON,
enableSilentMode,
disableSilentMode
} from '../../../scripts/modules/utils.js';
import { expandTask } from '../../../scripts/modules/task-manager.js';
import { findTasksJsonPath } from '../../../mcp-server/src/core/utils/path-utils.js';
import { sampleTasks } from '../../fixtures/sample-tasks.js'; import { sampleTasks } from '../../fixtures/sample-tasks.js';
// Mock logger // Mock logger
@@ -212,37 +220,6 @@ describe('MCP Server Direct Functions', () => {
}); });
describe('listTasksDirect', () => { describe('listTasksDirect', () => {
// Sample complexity report for testing
const mockComplexityReport = {
meta: {
generatedAt: '2025-03-24T20:01:35.986Z',
tasksAnalyzed: 3,
thresholdScore: 5,
projectName: 'Test Project',
usedResearch: false
},
complexityAnalysis: [
{
taskId: 1,
taskTitle: 'Initialize Project',
complexityScore: 3,
recommendedSubtasks: 2
},
{
taskId: 2,
taskTitle: 'Create Core Functionality',
complexityScore: 8,
recommendedSubtasks: 5
},
{
taskId: 3,
taskTitle: 'Implement UI Components',
complexityScore: 6,
recommendedSubtasks: 4
}
]
};
// Test wrapper function that doesn't rely on the actual implementation // Test wrapper function that doesn't rely on the actual implementation
async function testListTasks(args, mockLogger) { async function testListTasks(args, mockLogger) {
// File not found case // File not found case
@@ -258,35 +235,21 @@ describe('MCP Server Direct Functions', () => {
}; };
} }
// Check for complexity report
const complexityReport = mockReadComplexityReport();
let tasksData = [...sampleTasks.tasks];
// Add complexity scores if report exists
if (complexityReport && complexityReport.complexityAnalysis) {
tasksData = tasksData.map((task) => {
const analysis = complexityReport.complexityAnalysis.find(
(a) => a.taskId === task.id
);
if (analysis) {
return { ...task, complexityScore: analysis.complexityScore };
}
return task;
});
}
// Success case // Success case
if (!args.status && !args.withSubtasks) { if (!args.status && !args.withSubtasks) {
return { return {
success: true, success: true,
data: { data: {
tasks: tasksData, tasks: sampleTasks.tasks,
stats: { stats: {
total: tasksData.length, total: sampleTasks.tasks.length,
completed: tasksData.filter((t) => t.status === 'done').length, completed: sampleTasks.tasks.filter((t) => t.status === 'done')
inProgress: tasksData.filter((t) => t.status === 'in-progress')
.length, .length,
pending: tasksData.filter((t) => t.status === 'pending').length inProgress: sampleTasks.tasks.filter(
(t) => t.status === 'in-progress'
).length,
pending: sampleTasks.tasks.filter((t) => t.status === 'pending')
.length
} }
}, },
fromCache: false fromCache: false
@@ -295,14 +258,16 @@ describe('MCP Server Direct Functions', () => {
// Status filter case // Status filter case
if (args.status) { if (args.status) {
const filteredTasks = tasksData.filter((t) => t.status === args.status); const filteredTasks = sampleTasks.tasks.filter(
(t) => t.status === args.status
);
return { return {
success: true, success: true,
data: { data: {
tasks: filteredTasks, tasks: filteredTasks,
filter: args.status, filter: args.status,
stats: { stats: {
total: tasksData.length, total: sampleTasks.tasks.length,
filtered: filteredTasks.length filtered: filteredTasks.length
} }
}, },
@@ -315,10 +280,10 @@ describe('MCP Server Direct Functions', () => {
return { return {
success: true, success: true,
data: { data: {
tasks: tasksData, tasks: sampleTasks.tasks,
includeSubtasks: true, includeSubtasks: true,
stats: { stats: {
total: tasksData.length total: sampleTasks.tasks.length
} }
}, },
fromCache: false fromCache: false
@@ -405,29 +370,6 @@ describe('MCP Server Direct Functions', () => {
expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR'); expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR');
expect(mockLogger.error).toHaveBeenCalled(); expect(mockLogger.error).toHaveBeenCalled();
}); });
test('should include complexity scores when complexity report exists', async () => {
// Arrange
mockReadComplexityReport.mockReturnValueOnce(mockComplexityReport);
const args = {
projectRoot: testProjectRoot,
file: testTasksPath,
withSubtasks: true
};
// Act
const result = await testListTasks(args, mockLogger);
// Assert
expect(result.success).toBe(true);
// Check that tasks have complexity scores from the report
mockComplexityReport.complexityAnalysis.forEach((analysis) => {
const task = result.data.tasks.find((t) => t.id === analysis.taskId);
if (task) {
expect(task.complexityScore).toBe(analysis.complexityScore);
}
});
});
}); });
describe('expandTaskDirect', () => { describe('expandTaskDirect', () => {

View File

@@ -8,7 +8,6 @@ const mockGetResearchModelId = jest.fn();
const mockGetFallbackProvider = jest.fn(); const mockGetFallbackProvider = jest.fn();
const mockGetFallbackModelId = jest.fn(); const mockGetFallbackModelId = jest.fn();
const mockGetParametersForRole = jest.fn(); const mockGetParametersForRole = jest.fn();
const mockGetBaseUrlForRole = jest.fn();
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
getMainProvider: mockGetMainProvider, getMainProvider: mockGetMainProvider,
@@ -17,8 +16,7 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
getResearchModelId: mockGetResearchModelId, getResearchModelId: mockGetResearchModelId,
getFallbackProvider: mockGetFallbackProvider, getFallbackProvider: mockGetFallbackProvider,
getFallbackModelId: mockGetFallbackModelId, getFallbackModelId: mockGetFallbackModelId,
getParametersForRole: mockGetParametersForRole, getParametersForRole: mockGetParametersForRole
getBaseUrlForRole: mockGetBaseUrlForRole
})); }));
// Mock AI Provider Modules // Mock AI Provider Modules

View File

@@ -2,9 +2,8 @@
* Task finder tests * Task finder tests
*/ */
// Import after mocks are set up - No mocks needed for readComplexityReport anymore
import { findTaskById } from '../../scripts/modules/utils.js'; import { findTaskById } from '../../scripts/modules/utils.js';
import { emptySampleTasks, sampleTasks } from '../fixtures/sample-tasks.js'; import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
describe('Task Finder', () => { describe('Task Finder', () => {
describe('findTaskById function', () => { describe('findTaskById function', () => {
@@ -56,62 +55,5 @@ describe('Task Finder', () => {
expect(result.task).toBeNull(); expect(result.task).toBeNull();
expect(result.originalSubtaskCount).toBeNull(); expect(result.originalSubtaskCount).toBeNull();
}); });
test('should work correctly when no complexity report is provided', () => {
// Pass null as the complexity report
const result = findTaskById(sampleTasks.tasks, 2, null);
expect(result.task).toBeDefined();
expect(result.task.id).toBe(2);
expect(result.task.complexityScore).toBeUndefined();
});
test('should work correctly when task has no complexity data in the provided report', () => {
// Define a complexity report that doesn't include task 2
const complexityReport = {
complexityAnalysis: [{ taskId: 999, complexityScore: 5 }]
};
const result = findTaskById(sampleTasks.tasks, 2, complexityReport);
expect(result.task).toBeDefined();
expect(result.task.id).toBe(2);
expect(result.task.complexityScore).toBeUndefined();
});
test('should include complexity score when report is provided', () => {
// Define the complexity report for this test
const complexityReport = {
meta: {
generatedAt: '2023-01-01T00:00:00.000Z',
tasksAnalyzed: 3,
thresholdScore: 5
},
complexityAnalysis: [
{
taskId: 1,
taskTitle: 'Initialize Project',
complexityScore: 3,
recommendedSubtasks: 2
},
{
taskId: 2,
taskTitle: 'Create Core Functionality',
complexityScore: 8,
recommendedSubtasks: 5
},
{
taskId: 3,
taskTitle: 'Implement UI Components',
complexityScore: 6,
recommendedSubtasks: 4
}
]
};
const result = findTaskById(sampleTasks.tasks, 2, complexityReport);
expect(result.task).toBeDefined();
expect(result.task.id).toBe(2);
expect(result.task.complexityScore).toBe(8);
});
}); });
}); });

View File

@@ -199,12 +199,6 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
// Simplified version of updateSingleTaskStatus for testing // Simplified version of updateSingleTaskStatus for testing
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => { const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
if (!isValidTaskStatus(newStatus)) {
throw new Error(
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
);
}
// Check if it's a subtask (e.g., "1.2") // Check if it's a subtask (e.g., "1.2")
if (taskIdInput.includes('.')) { if (taskIdInput.includes('.')) {
const [parentId, subtaskId] = taskIdInput const [parentId, subtaskId] = taskIdInput
@@ -335,10 +329,6 @@ const testAddTask = (
import * as taskManager from '../../scripts/modules/task-manager.js'; import * as taskManager from '../../scripts/modules/task-manager.js';
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js'; import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js'; import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
import {
isValidTaskStatus,
TASK_STATUS_OPTIONS
} from '../../src/constants/task-status.js';
// Destructure the required functions for convenience // Destructure the required functions for convenience
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } = const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
@@ -1175,16 +1165,6 @@ describe('Task Manager Module', () => {
expect(testTasksData.tasks[1].status).toBe('done'); expect(testTasksData.tasks[1].status).toBe('done');
}); });
test('should throw error for invalid status', async () => {
// Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
// Assert
expect(() =>
testUpdateSingleTaskStatus(testTasksData, '2', 'Done')
).toThrow(/Error: Invalid status value: Done./);
});
test('should update subtask status', async () => { test('should update subtask status', async () => {
// Arrange // Arrange
const testTasksData = JSON.parse(JSON.stringify(sampleTasks)); const testTasksData = JSON.parse(JSON.stringify(sampleTasks));