Compare commits
3 Commits
add-comple
...
ThomasMldr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e8f73d1bea | ||
|
|
f9f3a24568 | ||
|
|
b1f3796ec7 |
@@ -1,5 +0,0 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
.taskmasterconfig now supports a baseUrl field per model role (main, research, fallback), allowing endpoint overrides for any provider.
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Fix the error handling of task status settings
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
Remove caching layer from MCP direct functions for task listing, next task, and complexity report
|
||||
|
||||
- Fixes issues users where having where they were getting stale data
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
'task-master-ai': minor
|
||||
---
|
||||
|
||||
Display task complexity scores in task lists, next task, and task details views.
|
||||
38
.env.example
38
.env.example
@@ -1,9 +1,29 @@
|
||||
# API Keys (Required for using in any role i.e. main/research/fallback -- see `task-master models`)
|
||||
ANTHROPIC_API_KEY=YOUR_ANTHROPIC_KEY_HERE
|
||||
PERPLEXITY_API_KEY=YOUR_PERPLEXITY_KEY_HERE
|
||||
OPENAI_API_KEY=YOUR_OPENAI_KEY_HERE
|
||||
GOOGLE_API_KEY=YOUR_GOOGLE_KEY_HERE
|
||||
MISTRAL_API_KEY=YOUR_MISTRAL_KEY_HERE
|
||||
OPENROUTER_API_KEY=YOUR_OPENROUTER_KEY_HERE
|
||||
XAI_API_KEY=YOUR_XAI_KEY_HERE
|
||||
AZURE_OPENAI_API_KEY=YOUR_AZURE_KEY_HERE
|
||||
# API Keys (Required)
|
||||
ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Format: sk-ant-api03-...
|
||||
PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Format: pplx-...
|
||||
OPENAI_API_KEY="your_openai_api_key_here" # Format: sk-...
|
||||
GOOGLE_API_KEY="your_google_api_key_here" # Format: AIza...
|
||||
MISTRAL_API_KEY="your_mistral_api_key_here" # Format: ...
|
||||
OPENROUTER_API_KEY="your_openrouter_api_key_here" # Format: sk-or-...
|
||||
XAI_API_KEY="your_xai_api_key_here" # Format: ...
|
||||
AZURE_OPENAI_API_KEY="your_azure_key_here" # Format: ...
|
||||
|
||||
# API Base URLs (Optional)
|
||||
ANTHROPIC_API_BASE_URL="optional_base_url_here" # Optional custom base URL for Anthropic API
|
||||
|
||||
# Model Configuration
|
||||
MODEL="claude-3-7-sonnet-20250219" # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229
|
||||
PERPLEXITY_MODEL="sonar-pro" # Perplexity model for research-backed subtasks
|
||||
MAX_TOKENS="64000" # Maximum tokens for model responses
|
||||
TEMPERATURE="0.2" # Temperature for model responses (0.0-1.0)
|
||||
|
||||
# Logging Configuration
|
||||
DEBUG="false" # Enable debug logging (true/false)
|
||||
LOG_LEVEL="info" # Log level (debug, info, warn, error)
|
||||
|
||||
# Task Generation Settings
|
||||
DEFAULT_SUBTASKS="5" # Default number of subtasks when expanding
|
||||
DEFAULT_PRIORITY="medium" # Default priority for generated tasks (high, medium, low)
|
||||
|
||||
# Project Metadata (Optional)
|
||||
PROJECT_NAME="Your Project Name" # Override default project name in tasks.json
|
||||
|
||||
@@ -30,6 +30,7 @@ MCP (Model Control Protocol) provides the easiest way to get started with Task M
|
||||
"args": ["-y", "--package=task-master-ai", "task-master-ai"],
|
||||
"env": {
|
||||
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE",
|
||||
"ANTHROPIC_API_BASE_URL": "YOUR_CUSTOM_BASE_URL_HERE (optional)",
|
||||
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE",
|
||||
"OPENAI_API_KEY": "YOUR_OPENAI_KEY_HERE",
|
||||
"GOOGLE_API_KEY": "YOUR_GOOGLE_KEY_HERE",
|
||||
|
||||
@@ -3784,6 +3784,7 @@ In this tutorial, you'll learn how to build a LLM-powered chatbot client that co
|
||||
if (!ANTHROPIC_API_KEY) {
|
||||
throw new Error("ANTHROPIC_API_KEY is not set");
|
||||
}
|
||||
const ANTHROPIC_API_BASE_URL = process.env.ANTHROPIC_API_BASE_URL;
|
||||
|
||||
class MCPClient {
|
||||
private mcp: Client;
|
||||
@@ -3794,6 +3795,7 @@ In this tutorial, you'll learn how to build a LLM-powered chatbot client that co
|
||||
constructor() {
|
||||
this.anthropic = new Anthropic({
|
||||
apiKey: ANTHROPIC_API_KEY,
|
||||
baseUrl: ANTHROPIC_API_BASE_URL,
|
||||
});
|
||||
this.mcp = new Client({ name: "mcp-client-cli", version: "1.0.0" });
|
||||
}
|
||||
|
||||
@@ -15,15 +15,13 @@ Taskmaster uses two primary methods for configuration:
|
||||
"provider": "anthropic",
|
||||
"modelId": "claude-3-7-sonnet-20250219",
|
||||
"maxTokens": 64000,
|
||||
"temperature": 0.2,
|
||||
"baseUrl": "https://api.anthropic.com/v1"
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1,
|
||||
"baseUrl": "https://api.perplexity.ai/v1"
|
||||
"temperature": 0.1
|
||||
},
|
||||
"fallback": {
|
||||
"provider": "anthropic",
|
||||
@@ -58,9 +56,8 @@ Taskmaster uses two primary methods for configuration:
|
||||
- `AZURE_OPENAI_API_KEY`: Your Azure OpenAI API key (also requires `AZURE_OPENAI_ENDPOINT`).
|
||||
- `OPENROUTER_API_KEY`: Your OpenRouter API key.
|
||||
- `XAI_API_KEY`: Your X-AI API key.
|
||||
- **Optional Endpoint Overrides:**
|
||||
- **Per-role `baseUrl` in `.taskmasterconfig`:** You can add a `baseUrl` property to any model role (`main`, `research`, `fallback`) to override the default API endpoint for that provider. If omitted, the provider's standard endpoint is used.
|
||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key (can also be set as `baseUrl` for the Azure model role).
|
||||
- **Optional Endpoint Overrides (in .taskmasterconfig):**
|
||||
- `AZURE_OPENAI_ENDPOINT`: Required if using Azure OpenAI key.
|
||||
- `OLLAMA_BASE_URL`: Override the default Ollama API URL (Default: `http://localhost:11434/api`).
|
||||
|
||||
**Important:** Settings like model ID selections (`main`, `research`, `fallback`), `maxTokens`, `temperature`, `logLevel`, `defaultSubtasks`, `defaultPriority`, and `projectName` are **managed in `.taskmasterconfig`**, not environment variables.
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
|
||||
/**
|
||||
* Direct function wrapper for displaying the complexity report with error handling and caching.
|
||||
@@ -85,20 +86,30 @@ export async function complexityReportDirect(args, log) {
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await coreActionFn();
|
||||
log.info('complexityReportDirect completed');
|
||||
return result;
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreActionFn,
|
||||
log
|
||||
});
|
||||
log.info(
|
||||
`complexityReportDirect completed. From cache: ${result.fromCache}`
|
||||
);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch (error) {
|
||||
// Catch unexpected errors from getCachedOrExecute itself
|
||||
// Ensure silent mode is disabled
|
||||
disableSilentMode();
|
||||
|
||||
log.error(`Unexpected error during complexityReport: ${error.message}`);
|
||||
log.error(
|
||||
`Unexpected error during getCachedOrExecute for complexityReport: ${error.message}`
|
||||
);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
},
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
import { listTasks } from '../../../../scripts/modules/task-manager.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import {
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
@@ -18,7 +19,7 @@ import {
|
||||
*/
|
||||
export async function listTasksDirect(args, log) {
|
||||
// Destructure the explicit tasksJsonPath from args
|
||||
const { tasksJsonPath, reportPath, status, withSubtasks } = args;
|
||||
const { tasksJsonPath, status, withSubtasks } = args;
|
||||
|
||||
if (!tasksJsonPath) {
|
||||
log.error('listTasksDirect called without tasksJsonPath');
|
||||
@@ -35,6 +36,7 @@ export async function listTasksDirect(args, log) {
|
||||
// Use the explicit tasksJsonPath for cache key
|
||||
const statusFilter = status || 'all';
|
||||
const withSubtasksFilter = withSubtasks || false;
|
||||
const cacheKey = `listTasks:${tasksJsonPath}:${statusFilter}:${withSubtasksFilter}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreListTasksAction = async () => {
|
||||
@@ -49,7 +51,6 @@ export async function listTasksDirect(args, log) {
|
||||
const resultData = listTasks(
|
||||
tasksJsonPath,
|
||||
statusFilter,
|
||||
reportPath,
|
||||
withSubtasksFilter,
|
||||
'json'
|
||||
);
|
||||
@@ -64,7 +65,6 @@ export async function listTasksDirect(args, log) {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
log.info(
|
||||
`Core listTasks function retrieved ${resultData.tasks.length} tasks`
|
||||
);
|
||||
@@ -88,19 +88,25 @@ export async function listTasksDirect(args, log) {
|
||||
}
|
||||
};
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await coreListTasksAction();
|
||||
log.info('listTasksDirect completed');
|
||||
return result;
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreListTasksAction,
|
||||
log
|
||||
});
|
||||
log.info(`listTasksDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch (error) {
|
||||
log.error(`Unexpected error during listTasks: ${error.message}`);
|
||||
// Catch unexpected errors from getCachedOrExecute itself (though unlikely)
|
||||
log.error(
|
||||
`Unexpected error during getCachedOrExecute for listTasks: ${error.message}`
|
||||
);
|
||||
console.error(error.stack);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'UNEXPECTED_ERROR',
|
||||
message: error.message
|
||||
}
|
||||
error: { code: 'CACHE_UTIL_ERROR', message: error.message },
|
||||
fromCache: false
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,8 @@
|
||||
*/
|
||||
|
||||
import { findNextTask } from '../../../../scripts/modules/task-manager.js';
|
||||
import {
|
||||
readJSON,
|
||||
readComplexityReport
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
import { readJSON } from '../../../../scripts/modules/utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import {
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
@@ -23,7 +21,7 @@ import {
|
||||
*/
|
||||
export async function nextTaskDirect(args, log) {
|
||||
// Destructure expected args
|
||||
const { tasksJsonPath, reportPath } = args;
|
||||
const { tasksJsonPath } = args;
|
||||
|
||||
if (!tasksJsonPath) {
|
||||
log.error('nextTaskDirect called without tasksJsonPath');
|
||||
@@ -37,6 +35,9 @@ export async function nextTaskDirect(args, log) {
|
||||
};
|
||||
}
|
||||
|
||||
// Generate cache key using the provided task path
|
||||
const cacheKey = `nextTask:${tasksJsonPath}`;
|
||||
|
||||
// Define the action function to be executed on cache miss
|
||||
const coreNextTaskAction = async () => {
|
||||
try {
|
||||
@@ -58,11 +59,8 @@ export async function nextTaskDirect(args, log) {
|
||||
};
|
||||
}
|
||||
|
||||
// Read the complexity report
|
||||
const complexityReport = readComplexityReport(reportPath);
|
||||
|
||||
// Find the next task
|
||||
const nextTask = findNextTask(data.tasks, complexityReport);
|
||||
const nextTask = findNextTask(data.tasks);
|
||||
|
||||
if (!nextTask) {
|
||||
log.info(
|
||||
@@ -120,11 +118,18 @@ export async function nextTaskDirect(args, log) {
|
||||
|
||||
// Use the caching utility
|
||||
try {
|
||||
const result = await coreNextTaskAction();
|
||||
log.info(`nextTaskDirect completed.`);
|
||||
return result;
|
||||
const result = await getCachedOrExecute({
|
||||
cacheKey,
|
||||
actionFn: coreNextTaskAction,
|
||||
log
|
||||
});
|
||||
log.info(`nextTaskDirect completed. From cache: ${result.fromCache}`);
|
||||
return result; // Returns { success, data/error, fromCache }
|
||||
} catch (error) {
|
||||
log.error(`Unexpected error during nextTask: ${error.message}`);
|
||||
// Catch unexpected errors from getCachedOrExecute itself
|
||||
log.error(
|
||||
`Unexpected error during getCachedOrExecute for nextTask: ${error.message}`
|
||||
);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
|
||||
@@ -3,10 +3,11 @@
|
||||
* Direct function implementation for showing task details
|
||||
*/
|
||||
|
||||
import { findTaskById, readJSON } from '../../../../scripts/modules/utils.js';
|
||||
import { getCachedOrExecute } from '../../tools/utils.js';
|
||||
import {
|
||||
findTaskById,
|
||||
readComplexityReport,
|
||||
readJSON
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
} from '../../../../scripts/modules/utils.js';
|
||||
import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
|
||||
@@ -16,7 +17,6 @@ import { findTasksJsonPath } from '../utils/path-utils.js';
|
||||
* @param {Object} args - Command arguments.
|
||||
* @param {string} args.id - Task ID to show.
|
||||
* @param {string} [args.file] - Optional path to the tasks file (passed to findTasksJsonPath).
|
||||
* @param {string} args.reportPath - Explicit path to the complexity report file.
|
||||
* @param {string} [args.status] - Optional status to filter subtasks by.
|
||||
* @param {string} args.projectRoot - Absolute path to the project root directory (already normalized by tool).
|
||||
* @param {Object} log - Logger object.
|
||||
@@ -27,7 +27,7 @@ export async function showTaskDirect(args, log) {
|
||||
// Destructure session from context if needed later, otherwise ignore
|
||||
// const { session } = context;
|
||||
// Destructure projectRoot and other args. projectRoot is assumed normalized.
|
||||
const { id, file, reportPath, status, projectRoot } = args;
|
||||
const { id, file, status, projectRoot } = args;
|
||||
|
||||
log.info(
|
||||
`Showing task direct function. ID: ${id}, File: ${file}, Status Filter: ${status}, ProjectRoot: ${projectRoot}`
|
||||
@@ -64,12 +64,9 @@ export async function showTaskDirect(args, log) {
|
||||
};
|
||||
}
|
||||
|
||||
const complexityReport = readComplexityReport(reportPath);
|
||||
|
||||
const { task, originalSubtaskCount } = findTaskById(
|
||||
tasksData.tasks,
|
||||
id,
|
||||
complexityReport,
|
||||
status
|
||||
);
|
||||
|
||||
|
||||
@@ -339,49 +339,6 @@ export function findPRDDocumentPath(projectRoot, explicitPath, log) {
|
||||
return null;
|
||||
}
|
||||
|
||||
export function findComplexityReportPath(projectRoot, explicitPath, log) {
|
||||
// If explicit path is provided, check if it exists
|
||||
if (explicitPath) {
|
||||
const fullPath = path.isAbsolute(explicitPath)
|
||||
? explicitPath
|
||||
: path.resolve(projectRoot, explicitPath);
|
||||
|
||||
if (fs.existsSync(fullPath)) {
|
||||
log.info(`Using provided PRD document path: ${fullPath}`);
|
||||
return fullPath;
|
||||
} else {
|
||||
log.warn(
|
||||
`Provided PRD document path not found: ${fullPath}, will search for alternatives`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Common locations and file patterns for PRD documents
|
||||
const commonLocations = [
|
||||
'', // Project root
|
||||
'scripts/'
|
||||
];
|
||||
|
||||
const commonFileNames = [
|
||||
'complexity-report.json',
|
||||
'task-complexity-report.json'
|
||||
];
|
||||
|
||||
// Check all possible combinations
|
||||
for (const location of commonLocations) {
|
||||
for (const fileName of commonFileNames) {
|
||||
const potentialPath = path.join(projectRoot, location, fileName);
|
||||
if (fs.existsSync(potentialPath)) {
|
||||
log.info(`Found PRD document at: ${potentialPath}`);
|
||||
return potentialPath;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.warn(`No PRD document found in common locations within ${projectRoot}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the tasks output directory path
|
||||
* @param {string} projectRoot - The project root directory
|
||||
|
||||
@@ -10,10 +10,7 @@ import {
|
||||
withNormalizedProjectRoot
|
||||
} from './utils.js';
|
||||
import { showTaskDirect } from '../core/task-master-core.js';
|
||||
import {
|
||||
findTasksJsonPath,
|
||||
findComplexityReportPath
|
||||
} from '../core/utils/path-utils.js';
|
||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Custom processor function that removes allTasks from the response
|
||||
@@ -53,12 +50,6 @@ export function registerShowTaskTool(server) {
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Path to the tasks file relative to project root'),
|
||||
complexityReport: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Path to the complexity report file (relative to project root or absolute)'
|
||||
),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
@@ -90,22 +81,9 @@ export function registerShowTaskTool(server) {
|
||||
}
|
||||
|
||||
// Call the direct function, passing the normalized projectRoot
|
||||
// Resolve the path to complexity report
|
||||
let complexityReportPath;
|
||||
try {
|
||||
complexityReportPath = findComplexityReportPath(
|
||||
projectRoot,
|
||||
args.complexityReport,
|
||||
log
|
||||
);
|
||||
} catch (error) {
|
||||
log.error(`Error finding complexity report: ${error.message}`);
|
||||
}
|
||||
const result = await showTaskDirect(
|
||||
{
|
||||
tasksJsonPath: tasksJsonPath,
|
||||
reportPath: complexityReportPath,
|
||||
// Pass other relevant args
|
||||
id: id,
|
||||
status: status,
|
||||
projectRoot: projectRoot
|
||||
|
||||
@@ -10,10 +10,7 @@ import {
|
||||
withNormalizedProjectRoot
|
||||
} from './utils.js';
|
||||
import { listTasksDirect } from '../core/task-master-core.js';
|
||||
import {
|
||||
findTasksJsonPath,
|
||||
findComplexityReportPath
|
||||
} from '../core/utils/path-utils.js';
|
||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Register the getTasks tool with the MCP server
|
||||
@@ -41,12 +38,6 @@ export function registerListTasksTool(server) {
|
||||
.describe(
|
||||
'Path to the tasks file (relative to project root or absolute)'
|
||||
),
|
||||
complexityReport: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Path to the complexity report file (relative to project root or absolute)'
|
||||
),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe('The directory of the project. Must be an absolute path.')
|
||||
@@ -69,23 +60,11 @@ export function registerListTasksTool(server) {
|
||||
);
|
||||
}
|
||||
|
||||
// Resolve the path to complexity report
|
||||
let complexityReportPath;
|
||||
try {
|
||||
complexityReportPath = findComplexityReportPath(
|
||||
args.projectRoot,
|
||||
args.complexityReport,
|
||||
log
|
||||
);
|
||||
} catch (error) {
|
||||
log.error(`Error finding complexity report: ${error.message}`);
|
||||
}
|
||||
const result = await listTasksDirect(
|
||||
{
|
||||
tasksJsonPath: tasksJsonPath,
|
||||
status: args.status,
|
||||
withSubtasks: args.withSubtasks,
|
||||
reportPath: complexityReportPath
|
||||
withSubtasks: args.withSubtasks
|
||||
},
|
||||
log
|
||||
);
|
||||
|
||||
@@ -10,10 +10,7 @@ import {
|
||||
withNormalizedProjectRoot
|
||||
} from './utils.js';
|
||||
import { nextTaskDirect } from '../core/task-master-core.js';
|
||||
import {
|
||||
findTasksJsonPath,
|
||||
findComplexityReportPath
|
||||
} from '../core/utils/path-utils.js';
|
||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
|
||||
/**
|
||||
* Register the next-task tool with the MCP server
|
||||
@@ -26,12 +23,6 @@ export function registerNextTaskTool(server) {
|
||||
'Find the next task to work on based on dependencies and status',
|
||||
parameters: z.object({
|
||||
file: z.string().optional().describe('Absolute path to the tasks file'),
|
||||
complexityReport: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe(
|
||||
'Path to the complexity report file (relative to project root or absolute)'
|
||||
),
|
||||
projectRoot: z
|
||||
.string()
|
||||
.describe('The directory of the project. Must be an absolute path.')
|
||||
@@ -54,21 +45,9 @@ export function registerNextTaskTool(server) {
|
||||
);
|
||||
}
|
||||
|
||||
// Resolve the path to complexity report
|
||||
let complexityReportPath;
|
||||
try {
|
||||
complexityReportPath = findComplexityReportPath(
|
||||
args.projectRoot,
|
||||
args.complexityReport,
|
||||
log
|
||||
);
|
||||
} catch (error) {
|
||||
log.error(`Error finding complexity report: ${error.message}`);
|
||||
}
|
||||
const result = await nextTaskDirect(
|
||||
{
|
||||
tasksJsonPath: tasksJsonPath,
|
||||
reportPath: complexityReportPath
|
||||
tasksJsonPath: tasksJsonPath
|
||||
},
|
||||
log
|
||||
);
|
||||
|
||||
@@ -11,7 +11,6 @@ import {
|
||||
} from './utils.js';
|
||||
import { setTaskStatusDirect } from '../core/task-master-core.js';
|
||||
import { findTasksJsonPath } from '../core/utils/path-utils.js';
|
||||
import { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';
|
||||
|
||||
/**
|
||||
* Register the setTaskStatus tool with the MCP server
|
||||
@@ -28,7 +27,7 @@ export function registerSetTaskStatusTool(server) {
|
||||
"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once."
|
||||
),
|
||||
status: z
|
||||
.enum(TASK_STATUS_OPTIONS)
|
||||
.string()
|
||||
.describe(
|
||||
"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'."
|
||||
),
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "task-master-ai",
|
||||
"version": "0.13.2",
|
||||
"version": "0.12",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "task-master-ai",
|
||||
"version": "0.13.2",
|
||||
"version": "0.12",
|
||||
"license": "MIT WITH Commons-Clause",
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "^1.2.10",
|
||||
|
||||
@@ -14,8 +14,7 @@ import {
|
||||
getResearchModelId,
|
||||
getFallbackProvider,
|
||||
getFallbackModelId,
|
||||
getParametersForRole,
|
||||
getBaseUrlForRole
|
||||
getParametersForRole
|
||||
} from './config-manager.js';
|
||||
import { log, resolveEnvVariable, findProjectRoot } from './utils.js';
|
||||
|
||||
@@ -285,13 +284,7 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
'AI service call failed for all configured roles.';
|
||||
|
||||
for (const currentRole of sequence) {
|
||||
let providerName,
|
||||
modelId,
|
||||
apiKey,
|
||||
roleParams,
|
||||
providerFnSet,
|
||||
providerApiFn,
|
||||
baseUrl;
|
||||
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
|
||||
|
||||
try {
|
||||
log('info', `New AI service call with role: ${currentRole}`);
|
||||
@@ -332,7 +325,6 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
|
||||
// Pass effectiveProjectRoot to getParametersForRole
|
||||
roleParams = getParametersForRole(currentRole, effectiveProjectRoot);
|
||||
baseUrl = getBaseUrlForRole(currentRole, effectiveProjectRoot);
|
||||
|
||||
// 2. Get Provider Function Set
|
||||
providerFnSet = PROVIDER_FUNCTIONS[providerName?.toLowerCase()];
|
||||
@@ -409,7 +401,6 @@ async function _unifiedServiceRunner(serviceType, params) {
|
||||
maxTokens: roleParams.maxTokens,
|
||||
temperature: roleParams.temperature,
|
||||
messages,
|
||||
baseUrl,
|
||||
...(serviceType === 'generateObject' && { schema, objectName }),
|
||||
...restApiParams
|
||||
};
|
||||
|
||||
@@ -73,10 +73,6 @@ import {
|
||||
getApiKeyStatusReport
|
||||
} from './task-manager/models.js';
|
||||
import { findProjectRoot } from './utils.js';
|
||||
import {
|
||||
isValidTaskStatus,
|
||||
TASK_STATUS_OPTIONS
|
||||
} from '../../src/constants/task-status.js';
|
||||
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||
/**
|
||||
* Runs the interactive setup process for model configuration.
|
||||
@@ -1037,7 +1033,7 @@ function registerCommands(programInstance) {
|
||||
)
|
||||
.option(
|
||||
'-s, --status <status>',
|
||||
`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})`
|
||||
'New status (todo, in-progress, review, done)'
|
||||
)
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.action(async (options) => {
|
||||
@@ -1050,16 +1046,6 @@ function registerCommands(programInstance) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!isValidTaskStatus(status)) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
)
|
||||
);
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(
|
||||
chalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)
|
||||
);
|
||||
@@ -1072,16 +1058,10 @@ function registerCommands(programInstance) {
|
||||
.command('list')
|
||||
.description('List all tasks')
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option(
|
||||
'-r, --report <report>',
|
||||
'Path to the complexity report file',
|
||||
'scripts/task-complexity-report.json'
|
||||
)
|
||||
.option('-s, --status <status>', 'Filter by status')
|
||||
.option('--with-subtasks', 'Show subtasks for each task')
|
||||
.action(async (options) => {
|
||||
const tasksPath = options.file;
|
||||
const reportPath = options.report;
|
||||
const statusFilter = options.status;
|
||||
const withSubtasks = options.withSubtasks || false;
|
||||
|
||||
@@ -1093,7 +1073,7 @@ function registerCommands(programInstance) {
|
||||
console.log(chalk.blue('Including subtasks in listing'));
|
||||
}
|
||||
|
||||
await listTasks(tasksPath, statusFilter, reportPath, withSubtasks);
|
||||
await listTasks(tasksPath, statusFilter, withSubtasks);
|
||||
});
|
||||
|
||||
// expand command
|
||||
@@ -1399,15 +1379,9 @@ function registerCommands(programInstance) {
|
||||
`Show the next task to work on based on dependencies and status${chalk.reset('')}`
|
||||
)
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option(
|
||||
'-r, --report <report>',
|
||||
'Path to the complexity report file',
|
||||
'scripts/task-complexity-report.json'
|
||||
)
|
||||
.action(async (options) => {
|
||||
const tasksPath = options.file;
|
||||
const reportPath = options.report;
|
||||
await displayNextTask(tasksPath, reportPath);
|
||||
await displayNextTask(tasksPath);
|
||||
});
|
||||
|
||||
// show command
|
||||
@@ -1420,11 +1394,6 @@ function registerCommands(programInstance) {
|
||||
.option('-i, --id <id>', 'Task ID to show')
|
||||
.option('-s, --status <status>', 'Filter subtasks by status') // ADDED status option
|
||||
.option('-f, --file <file>', 'Path to the tasks file', 'tasks/tasks.json')
|
||||
.option(
|
||||
'-r, --report <report>',
|
||||
'Path to the complexity report file',
|
||||
'scripts/task-complexity-report.json'
|
||||
)
|
||||
.action(async (taskId, options) => {
|
||||
const idArg = taskId || options.id;
|
||||
const statusFilter = options.status; // ADDED: Capture status filter
|
||||
@@ -1435,9 +1404,8 @@ function registerCommands(programInstance) {
|
||||
}
|
||||
|
||||
const tasksPath = options.file;
|
||||
const reportPath = options.report;
|
||||
// PASS statusFilter to the display function
|
||||
await displayTaskById(tasksPath, idArg, reportPath, statusFilter);
|
||||
await displayTaskById(tasksPath, idArg, statusFilter);
|
||||
});
|
||||
|
||||
// add-dependency command
|
||||
|
||||
@@ -677,13 +677,6 @@ function getAllProviders() {
|
||||
return Object.keys(MODEL_MAP || {});
|
||||
}
|
||||
|
||||
function getBaseUrlForRole(role, explicitRoot = null) {
|
||||
const roleConfig = getModelConfigForRole(role, explicitRoot);
|
||||
return roleConfig && typeof roleConfig.baseUrl === 'string'
|
||||
? roleConfig.baseUrl
|
||||
: undefined;
|
||||
}
|
||||
|
||||
export {
|
||||
// Core config access
|
||||
getConfig,
|
||||
@@ -711,7 +704,6 @@ export {
|
||||
getFallbackModelId,
|
||||
getFallbackMaxTokens,
|
||||
getFallbackTemperature,
|
||||
getBaseUrlForRole,
|
||||
|
||||
// Global setting getters (No env var overrides)
|
||||
getLogLevel,
|
||||
|
||||
@@ -23,7 +23,7 @@ import updateSubtaskById from './task-manager/update-subtask-by-id.js';
|
||||
import removeTask from './task-manager/remove-task.js';
|
||||
import taskExists from './task-manager/task-exists.js';
|
||||
import isTaskDependentOn from './task-manager/is-task-dependent.js';
|
||||
import { readComplexityReport } from './utils.js';
|
||||
|
||||
// Export task manager functions
|
||||
export {
|
||||
parsePRD,
|
||||
@@ -45,6 +45,5 @@ export {
|
||||
removeTask,
|
||||
findTaskById,
|
||||
taskExists,
|
||||
isTaskDependentOn,
|
||||
readComplexityReport
|
||||
isTaskDependentOn
|
||||
};
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
import { log } from '../utils.js';
|
||||
import { addComplexityToTask } from '../utils.js';
|
||||
|
||||
/**
|
||||
* Return the next work item:
|
||||
* • Prefer an eligible SUBTASK that belongs to any parent task
|
||||
@@ -18,10 +15,9 @@ import { addComplexityToTask } from '../utils.js';
|
||||
* ─ parentId → number (present only when it's a subtask)
|
||||
*
|
||||
* @param {Object[]} tasks – full array of top-level tasks, each may contain .subtasks[]
|
||||
* @param {Object} [complexityReport=null] - Optional complexity report object
|
||||
* @returns {Object|null} – next work item or null if nothing is eligible
|
||||
*/
|
||||
function findNextTask(tasks, complexityReport = null) {
|
||||
function findNextTask(tasks) {
|
||||
// ---------- helpers ----------------------------------------------------
|
||||
const priorityValues = { high: 3, medium: 2, low: 1 };
|
||||
|
||||
@@ -95,14 +91,7 @@ function findNextTask(tasks, complexityReport = null) {
|
||||
if (aPar !== bPar) return aPar - bPar;
|
||||
return aSub - bSub;
|
||||
});
|
||||
const nextTask = candidateSubtasks[0];
|
||||
|
||||
// Add complexity to the task before returning
|
||||
if (nextTask && complexityReport) {
|
||||
addComplexityToTask(nextTask, complexityReport);
|
||||
}
|
||||
|
||||
return nextTask;
|
||||
return candidateSubtasks[0];
|
||||
}
|
||||
|
||||
// ---------- 2) fall back to top-level tasks (original logic) ------------
|
||||
@@ -127,11 +116,6 @@ function findNextTask(tasks, complexityReport = null) {
|
||||
return a.id - b.id;
|
||||
})[0];
|
||||
|
||||
// Add complexity to the task before returning
|
||||
if (nextTask && complexityReport) {
|
||||
addComplexityToTask(nextTask, complexityReport);
|
||||
}
|
||||
|
||||
return nextTask;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,20 +2,13 @@ import chalk from 'chalk';
|
||||
import boxen from 'boxen';
|
||||
import Table from 'cli-table3';
|
||||
|
||||
import {
|
||||
log,
|
||||
readJSON,
|
||||
truncate,
|
||||
readComplexityReport,
|
||||
addComplexityToTask
|
||||
} from '../utils.js';
|
||||
import { log, readJSON, truncate } from '../utils.js';
|
||||
import findNextTask from './find-next-task.js';
|
||||
|
||||
import {
|
||||
displayBanner,
|
||||
getStatusWithColor,
|
||||
formatDependenciesWithStatus,
|
||||
getComplexityWithColor,
|
||||
createProgressBar
|
||||
} from '../ui.js';
|
||||
|
||||
@@ -23,7 +16,6 @@ import {
|
||||
* List all tasks
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
* @param {string} statusFilter - Filter by status
|
||||
* @param {string} reportPath - Path to the complexity report
|
||||
* @param {boolean} withSubtasks - Whether to show subtasks
|
||||
* @param {string} outputFormat - Output format (text or json)
|
||||
* @returns {Object} - Task list result for json format
|
||||
@@ -31,7 +23,6 @@ import {
|
||||
function listTasks(
|
||||
tasksPath,
|
||||
statusFilter,
|
||||
reportPath = null,
|
||||
withSubtasks = false,
|
||||
outputFormat = 'text'
|
||||
) {
|
||||
@@ -46,13 +37,6 @@ function listTasks(
|
||||
throw new Error(`No valid tasks found in ${tasksPath}`);
|
||||
}
|
||||
|
||||
// Add complexity scores to tasks if report exists
|
||||
const complexityReport = readComplexityReport(reportPath);
|
||||
// Apply complexity scores to tasks
|
||||
if (complexityReport && complexityReport.complexityAnalysis) {
|
||||
data.tasks.forEach((task) => addComplexityToTask(task, complexityReport));
|
||||
}
|
||||
|
||||
// Filter tasks by status if specified
|
||||
const filteredTasks =
|
||||
statusFilter && statusFilter.toLowerCase() !== 'all' // <-- Added check for 'all'
|
||||
@@ -273,8 +257,8 @@ function listTasks(
|
||||
);
|
||||
const avgDependenciesPerTask = totalDependencies / data.tasks.length;
|
||||
|
||||
// Find next task to work on, passing the complexity report
|
||||
const nextItem = findNextTask(data.tasks, complexityReport);
|
||||
// Find next task to work on
|
||||
const nextItem = findNextTask(data.tasks);
|
||||
|
||||
// Get terminal width - more reliable method
|
||||
let terminalWidth;
|
||||
@@ -317,11 +301,8 @@ function listTasks(
|
||||
`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\n\n` +
|
||||
chalk.cyan.bold('Next Task to Work On:') +
|
||||
'\n' +
|
||||
`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}
|
||||
` +
|
||||
`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : ''}
|
||||
` +
|
||||
`Complexity: ${nextItem && nextItem.complexityScore ? getComplexityWithColor(nextItem.complexityScore) : chalk.gray('N/A')}`;
|
||||
`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}\n` +
|
||||
`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : ''}`;
|
||||
|
||||
// Calculate width for side-by-side display
|
||||
// Box borders, padding take approximately 4 chars on each side
|
||||
@@ -431,16 +412,9 @@ function listTasks(
|
||||
// Make dependencies column smaller as requested (-20%)
|
||||
const depsWidthPct = 20;
|
||||
|
||||
const complexityWidthPct = 10;
|
||||
|
||||
// Calculate title/description width as remaining space (+20% from dependencies reduction)
|
||||
const titleWidthPct =
|
||||
100 -
|
||||
idWidthPct -
|
||||
statusWidthPct -
|
||||
priorityWidthPct -
|
||||
depsWidthPct -
|
||||
complexityWidthPct;
|
||||
100 - idWidthPct - statusWidthPct - priorityWidthPct - depsWidthPct;
|
||||
|
||||
// Allow 10 characters for borders and padding
|
||||
const availableWidth = terminalWidth - 10;
|
||||
@@ -450,9 +424,6 @@ function listTasks(
|
||||
const statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));
|
||||
const priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));
|
||||
const depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));
|
||||
const complexityWidth = Math.floor(
|
||||
availableWidth * (complexityWidthPct / 100)
|
||||
);
|
||||
const titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));
|
||||
|
||||
// Create a table with correct borders and spacing
|
||||
@@ -462,17 +433,9 @@ function listTasks(
|
||||
chalk.cyan.bold('Title'),
|
||||
chalk.cyan.bold('Status'),
|
||||
chalk.cyan.bold('Priority'),
|
||||
chalk.cyan.bold('Dependencies'),
|
||||
chalk.cyan.bold('Complexity')
|
||||
],
|
||||
colWidths: [
|
||||
idWidth,
|
||||
titleWidth,
|
||||
statusWidth,
|
||||
priorityWidth,
|
||||
depsWidth,
|
||||
complexityWidth // Added complexity column width
|
||||
chalk.cyan.bold('Dependencies')
|
||||
],
|
||||
colWidths: [idWidth, titleWidth, statusWidth, priorityWidth, depsWidth],
|
||||
style: {
|
||||
head: [], // No special styling for header
|
||||
border: [], // No special styling for border
|
||||
@@ -491,8 +454,7 @@ function listTasks(
|
||||
depText = formatDependenciesWithStatus(
|
||||
task.dependencies,
|
||||
data.tasks,
|
||||
true,
|
||||
complexityReport
|
||||
true
|
||||
);
|
||||
} else {
|
||||
depText = chalk.gray('None');
|
||||
@@ -518,10 +480,7 @@ function listTasks(
|
||||
truncate(cleanTitle, titleWidth - 3),
|
||||
status,
|
||||
priorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),
|
||||
depText,
|
||||
task.complexityScore
|
||||
? getComplexityWithColor(task.complexityScore)
|
||||
: chalk.gray('N/A')
|
||||
depText // No truncation for dependencies
|
||||
]);
|
||||
|
||||
// Add subtasks if requested
|
||||
@@ -557,8 +516,6 @@ function listTasks(
|
||||
// Default to regular task dependency
|
||||
const depTask = data.tasks.find((t) => t.id === depId);
|
||||
if (depTask) {
|
||||
// Add complexity to depTask before checking status
|
||||
addComplexityToTask(depTask, complexityReport);
|
||||
const isDone =
|
||||
depTask.status === 'done' || depTask.status === 'completed';
|
||||
const isInProgress = depTask.status === 'in-progress';
|
||||
@@ -584,10 +541,7 @@ function listTasks(
|
||||
chalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),
|
||||
getStatusWithColor(subtask.status, true),
|
||||
chalk.dim('-'),
|
||||
subtaskDepText,
|
||||
subtask.complexityScore
|
||||
? chalk.gray(`${subtask.complexityScore}`)
|
||||
: chalk.gray('N/A')
|
||||
subtaskDepText // No truncation for dependencies
|
||||
]);
|
||||
});
|
||||
}
|
||||
@@ -643,8 +597,6 @@ function listTasks(
|
||||
subtasksSection = `\n\n${chalk.white.bold('Subtasks:')}\n`;
|
||||
subtasksSection += parentTaskForSubtasks.subtasks
|
||||
.map((subtask) => {
|
||||
// Add complexity to subtask before display
|
||||
addComplexityToTask(subtask, complexityReport);
|
||||
// Using a more simplified format for subtask status display
|
||||
const status = subtask.status || 'pending';
|
||||
const statusColors = {
|
||||
@@ -673,8 +625,8 @@ function listTasks(
|
||||
'\n\n' +
|
||||
// Use nextItem.priority, nextItem.status, nextItem.dependencies
|
||||
`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\n` +
|
||||
`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : chalk.gray('None')}\n\n` +
|
||||
// Use nextTask.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
|
||||
`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true) : chalk.gray('None')}\n\n` +
|
||||
// Use nextItem.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)
|
||||
// *** Fetching original item for description and details ***
|
||||
`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +
|
||||
subtasksSection + // <-- Subtasks are handled above now
|
||||
|
||||
@@ -8,10 +8,6 @@ import { validateTaskDependencies } from '../dependency-manager.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import updateSingleTaskStatus from './update-single-task-status.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
import {
|
||||
isValidTaskStatus,
|
||||
TASK_STATUS_OPTIONS
|
||||
} from '../../../src/constants/task-status.js';
|
||||
|
||||
/**
|
||||
* Set the status of a task
|
||||
@@ -23,11 +19,6 @@ import {
|
||||
*/
|
||||
async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
||||
try {
|
||||
if (!isValidTaskStatus(newStatus)) {
|
||||
throw new Error(
|
||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
);
|
||||
}
|
||||
// Determine if we're in MCP mode by checking for mcpLog
|
||||
const isMcpMode = !!options?.mcpLog;
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import chalk from 'chalk';
|
||||
|
||||
import { log } from '../utils.js';
|
||||
import { isValidTaskStatus } from '../../../src/constants/task-status.js';
|
||||
|
||||
/**
|
||||
* Update the status of a single task
|
||||
@@ -18,12 +17,6 @@ async function updateSingleTaskStatus(
|
||||
data,
|
||||
showUi = true
|
||||
) {
|
||||
if (!isValidTaskStatus(newStatus)) {
|
||||
throw new Error(
|
||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Check if it's a subtask (e.g., "1.2")
|
||||
if (taskIdInput.includes('.')) {
|
||||
const [parentId, subtaskId] = taskIdInput
|
||||
|
||||
@@ -17,13 +17,8 @@ import {
|
||||
isSilentMode
|
||||
} from './utils.js';
|
||||
import fs from 'fs';
|
||||
import {
|
||||
findNextTask,
|
||||
analyzeTaskComplexity,
|
||||
readComplexityReport
|
||||
} from './task-manager.js';
|
||||
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
||||
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
||||
import { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';
|
||||
import { getTaskMasterVersion } from '../../src/utils/getVersion.js';
|
||||
|
||||
// Create a color gradient for the banner
|
||||
@@ -268,14 +263,12 @@ function getStatusWithColor(status, forTable = false) {
|
||||
* @param {Array} dependencies - Array of dependency IDs
|
||||
* @param {Array} allTasks - Array of all tasks
|
||||
* @param {boolean} forConsole - Whether the output is for console display
|
||||
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
|
||||
* @returns {string} Formatted dependencies string
|
||||
*/
|
||||
function formatDependenciesWithStatus(
|
||||
dependencies,
|
||||
allTasks,
|
||||
forConsole = false,
|
||||
complexityReport = null // Add complexityReport parameter
|
||||
forConsole = false
|
||||
) {
|
||||
if (
|
||||
!dependencies ||
|
||||
@@ -339,11 +332,7 @@ function formatDependenciesWithStatus(
|
||||
typeof depId === 'string' ? parseInt(depId, 10) : depId;
|
||||
|
||||
// Look up the task using the numeric ID
|
||||
const depTaskResult = findTaskById(
|
||||
allTasks,
|
||||
numericDepId,
|
||||
complexityReport
|
||||
);
|
||||
const depTaskResult = findTaskById(allTasks, numericDepId);
|
||||
const depTask = depTaskResult.task; // Access the task object from the result
|
||||
|
||||
if (!depTask) {
|
||||
@@ -459,7 +448,7 @@ function displayHelp() {
|
||||
{
|
||||
name: 'set-status',
|
||||
args: '--id=<id> --status=<status>',
|
||||
desc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`
|
||||
desc: 'Update task status (done, pending, etc.)'
|
||||
},
|
||||
{
|
||||
name: 'update',
|
||||
@@ -762,7 +751,7 @@ function truncateString(str, maxLength) {
|
||||
* Display the next task to work on
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
*/
|
||||
async function displayNextTask(tasksPath, complexityReportPath = null) {
|
||||
async function displayNextTask(tasksPath) {
|
||||
displayBanner();
|
||||
|
||||
// Read the tasks file
|
||||
@@ -772,11 +761,8 @@ async function displayNextTask(tasksPath, complexityReportPath = null) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Read complexity report once
|
||||
const complexityReport = readComplexityReport(complexityReportPath);
|
||||
|
||||
// Find the next task
|
||||
const nextTask = findNextTask(data.tasks, complexityReport);
|
||||
const nextTask = findNextTask(data.tasks);
|
||||
|
||||
if (!nextTask) {
|
||||
console.log(
|
||||
@@ -837,18 +823,7 @@ async function displayNextTask(tasksPath, complexityReportPath = null) {
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Dependencies:'),
|
||||
formatDependenciesWithStatus(
|
||||
nextTask.dependencies,
|
||||
data.tasks,
|
||||
true,
|
||||
complexityReport
|
||||
)
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Complexity:'),
|
||||
nextTask.complexityScore
|
||||
? getComplexityWithColor(nextTask.complexityScore)
|
||||
: chalk.gray('N/A')
|
||||
formatDependenciesWithStatus(nextTask.dependencies, data.tasks, true)
|
||||
],
|
||||
[chalk.cyan.bold('Description:'), nextTask.description]
|
||||
);
|
||||
@@ -1016,12 +991,7 @@ async function displayNextTask(tasksPath, complexityReportPath = null) {
|
||||
* @param {string|number} taskId - The ID of the task to display
|
||||
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
||||
*/
|
||||
async function displayTaskById(
|
||||
tasksPath,
|
||||
taskId,
|
||||
complexityReportPath = null,
|
||||
statusFilter = null
|
||||
) {
|
||||
async function displayTaskById(tasksPath, taskId, statusFilter = null) {
|
||||
displayBanner();
|
||||
|
||||
// Read the tasks file
|
||||
@@ -1031,15 +1001,11 @@ async function displayTaskById(
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Read complexity report once
|
||||
const complexityReport = readComplexityReport(complexityReportPath);
|
||||
|
||||
// Find the task by ID, applying the status filter if provided
|
||||
// Returns { task, originalSubtaskCount, originalSubtasks }
|
||||
const { task, originalSubtaskCount, originalSubtasks } = findTaskById(
|
||||
data.tasks,
|
||||
taskId,
|
||||
complexityReport,
|
||||
statusFilter
|
||||
);
|
||||
|
||||
@@ -1094,12 +1060,6 @@ async function displayTaskById(
|
||||
chalk.cyan.bold('Status:'),
|
||||
getStatusWithColor(task.status || 'pending', true)
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Complexity:'),
|
||||
task.complexityScore
|
||||
? getComplexityWithColor(task.complexityScore)
|
||||
: chalk.gray('N/A')
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Description:'),
|
||||
task.description || 'No description provided.'
|
||||
@@ -1178,18 +1138,7 @@ async function displayTaskById(
|
||||
[chalk.cyan.bold('Priority:'), priorityColor(task.priority || 'medium')],
|
||||
[
|
||||
chalk.cyan.bold('Dependencies:'),
|
||||
formatDependenciesWithStatus(
|
||||
task.dependencies,
|
||||
data.tasks,
|
||||
true,
|
||||
complexityReport
|
||||
)
|
||||
],
|
||||
[
|
||||
chalk.cyan.bold('Complexity:'),
|
||||
task.complexityScore
|
||||
? getComplexityWithColor(task.complexityScore)
|
||||
: chalk.gray('N/A')
|
||||
formatDependenciesWithStatus(task.dependencies, data.tasks, true)
|
||||
],
|
||||
[chalk.cyan.bold('Description:'), task.description]
|
||||
);
|
||||
|
||||
@@ -275,22 +275,6 @@ function findTaskInComplexityReport(report, taskId) {
|
||||
return report.complexityAnalysis.find((task) => task.taskId === taskId);
|
||||
}
|
||||
|
||||
function addComplexityToTask(task, complexityReport) {
|
||||
let taskId;
|
||||
if (task.isSubtask) {
|
||||
taskId = task.parentTask.id;
|
||||
} else if (task.parentId) {
|
||||
taskId = task.parentId;
|
||||
} else {
|
||||
taskId = task.id;
|
||||
}
|
||||
|
||||
const taskAnalysis = findTaskInComplexityReport(complexityReport, taskId);
|
||||
if (taskAnalysis) {
|
||||
task.complexityScore = taskAnalysis.complexityScore;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a task exists in the tasks array
|
||||
* @param {Array} tasks - The tasks array
|
||||
@@ -341,17 +325,10 @@ function formatTaskId(id) {
|
||||
* Finds a task by ID in the tasks array. Optionally filters subtasks by status.
|
||||
* @param {Array} tasks - The tasks array
|
||||
* @param {string|number} taskId - The task ID to find
|
||||
* @param {Object|null} complexityReport - Optional pre-loaded complexity report
|
||||
* @returns {Object|null} The task object or null if not found
|
||||
* @param {string} [statusFilter] - Optional status to filter subtasks by
|
||||
* @returns {{task: Object|null, originalSubtaskCount: number|null}} The task object (potentially with filtered subtasks) and the original subtask count if filtered, or nulls if not found.
|
||||
*/
|
||||
function findTaskById(
|
||||
tasks,
|
||||
taskId,
|
||||
complexityReport = null,
|
||||
statusFilter = null
|
||||
) {
|
||||
function findTaskById(tasks, taskId, statusFilter = null) {
|
||||
if (!taskId || !tasks || !Array.isArray(tasks)) {
|
||||
return { task: null, originalSubtaskCount: null };
|
||||
}
|
||||
@@ -379,17 +356,10 @@ function findTaskById(
|
||||
subtask.isSubtask = true;
|
||||
}
|
||||
|
||||
// If we found a task, check for complexity data
|
||||
if (subtask && complexityReport) {
|
||||
addComplexityToTask(subtask, complexityReport);
|
||||
}
|
||||
|
||||
// Return the found subtask (or null) and null for originalSubtaskCount
|
||||
return { task: subtask || null, originalSubtaskCount: null };
|
||||
}
|
||||
|
||||
let taskResult = null;
|
||||
let originalSubtaskCount = null;
|
||||
|
||||
// Find the main task
|
||||
const id = parseInt(taskId, 10);
|
||||
const task = tasks.find((t) => t.id === id) || null;
|
||||
@@ -399,8 +369,6 @@ function findTaskById(
|
||||
return { task: null, originalSubtaskCount: null };
|
||||
}
|
||||
|
||||
taskResult = task;
|
||||
|
||||
// If task found and statusFilter provided, filter its subtasks
|
||||
if (statusFilter && task.subtasks && Array.isArray(task.subtasks)) {
|
||||
const originalSubtaskCount = task.subtasks.length;
|
||||
@@ -411,18 +379,12 @@ function findTaskById(
|
||||
subtask.status &&
|
||||
subtask.status.toLowerCase() === statusFilter.toLowerCase()
|
||||
);
|
||||
|
||||
taskResult = filteredTask;
|
||||
originalSubtaskCount = originalSubtaskCount;
|
||||
// Return the filtered task and the original count
|
||||
return { task: filteredTask, originalSubtaskCount: originalSubtaskCount };
|
||||
}
|
||||
|
||||
// If task found and complexityReport provided, add complexity data
|
||||
if (taskResult && complexityReport) {
|
||||
addComplexityToTask(taskResult, complexityReport);
|
||||
}
|
||||
|
||||
// Return the found task and original subtask count
|
||||
return { task: taskResult, originalSubtaskCount };
|
||||
// Return original task and null count if no filter or no subtasks
|
||||
return { task: task, originalSubtaskCount: null };
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -562,11 +524,10 @@ export {
|
||||
findCycles,
|
||||
toKebabCase,
|
||||
detectCamelCaseFlags,
|
||||
disableSilentMode,
|
||||
enableSilentMode,
|
||||
getTaskManager,
|
||||
disableSilentMode,
|
||||
isSilentMode,
|
||||
addComplexityToTask,
|
||||
resolveEnvVariable,
|
||||
getTaskManager,
|
||||
findProjectRoot
|
||||
};
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
* using the Vercel AI SDK.
|
||||
*/
|
||||
import { createAnthropic } from '@ai-sdk/anthropic';
|
||||
import { generateText, streamText, generateObject } from 'ai';
|
||||
import { generateText, streamText, generateObject, streamObject } from 'ai';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||
|
||||
// TODO: Implement standardized functions for generateText, streamText, generateObject
|
||||
@@ -17,7 +17,7 @@ import { log } from '../../scripts/modules/utils.js'; // Assuming utils is acces
|
||||
// Remove the global variable and caching logic
|
||||
// let anthropicClient;
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
function getClient(apiKey) {
|
||||
if (!apiKey) {
|
||||
// In a real scenario, this would use the config resolver.
|
||||
// Throwing error here if key isn't passed for simplicity.
|
||||
@@ -30,12 +30,14 @@ function getClient(apiKey, baseUrl) {
|
||||
// Create and return a new instance directly with standard version header
|
||||
return createAnthropic({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl }),
|
||||
baseURL: 'https://api.anthropic.com/v1',
|
||||
// Use standard version header instead of beta
|
||||
headers: {
|
||||
'anthropic-beta': 'output-128k-2025-02-19'
|
||||
}
|
||||
});
|
||||
// }
|
||||
// return anthropicClient;
|
||||
}
|
||||
|
||||
// --- Standardized Service Function Implementations ---
|
||||
@@ -49,7 +51,6 @@ function getClient(apiKey, baseUrl) {
|
||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -58,12 +59,11 @@ export async function generateAnthropicText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
temperature
|
||||
}) {
|
||||
log('debug', `Generating Anthropic text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
@@ -93,7 +93,6 @@ export async function generateAnthropicText({
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
@@ -102,20 +101,20 @@ export async function streamAnthropicText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
temperature
|
||||
}) {
|
||||
log('debug', `Streaming Anthropic text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
|
||||
// --- DEBUG LOGGING --- >>
|
||||
log(
|
||||
'debug',
|
||||
'[streamAnthropicText] Parameters received by streamText:',
|
||||
JSON.stringify(
|
||||
{
|
||||
modelId: modelId,
|
||||
messages: messages,
|
||||
modelId: modelId, // Log modelId being used
|
||||
messages: messages, // Log the messages array
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
},
|
||||
@@ -123,19 +122,25 @@ export async function streamAnthropicText({
|
||||
2
|
||||
)
|
||||
);
|
||||
// --- << DEBUG LOGGING ---
|
||||
|
||||
const stream = await streamText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
// Beta header moved to client initialization
|
||||
// TODO: Add other relevant parameters
|
||||
});
|
||||
|
||||
// *** RETURN THE FULL STREAM OBJECT, NOT JUST stream.textStream ***
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log('error', `Anthropic streamText failed: ${error.message}`, error.stack);
|
||||
log(
|
||||
'error',
|
||||
`Anthropic streamText failed: ${error.message}`,
|
||||
error.stack // Log stack trace for more details
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -155,7 +160,6 @@ export async function streamAnthropicText({
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the Anthropic API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
@@ -167,22 +171,24 @@ export async function generateAnthropicObject({
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
maxRetries = 3
|
||||
}) {
|
||||
log(
|
||||
'debug',
|
||||
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
|
||||
// Log basic debug info
|
||||
log(
|
||||
'debug',
|
||||
`Using maxTokens: ${maxTokens}, temperature: ${temperature}, model: ${modelId}`
|
||||
);
|
||||
|
||||
const result = await generateObject({
|
||||
model: client(modelId),
|
||||
mode: 'tool',
|
||||
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
|
||||
schema: schema,
|
||||
messages: messages,
|
||||
tool: {
|
||||
@@ -193,12 +199,14 @@ export async function generateAnthropicObject({
|
||||
temperature: temperature,
|
||||
maxRetries: maxRetries
|
||||
});
|
||||
|
||||
log(
|
||||
'debug',
|
||||
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
|
||||
);
|
||||
return result.object;
|
||||
} catch (error) {
|
||||
// Simple error logging
|
||||
log(
|
||||
'error',
|
||||
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
|
||||
|
||||
@@ -12,16 +12,6 @@ import { log } from '../../scripts/modules/utils.js'; // Import logging utility
|
||||
const DEFAULT_MODEL = 'gemini-2.0-pro'; // Or a suitable default
|
||||
const DEFAULT_TEMPERATURE = 0.2; // Or a suitable default
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
}
|
||||
return createGoogleGenerativeAI({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using a Google AI model.
|
||||
*
|
||||
@@ -39,8 +29,7 @@ async function generateGoogleText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens,
|
||||
baseUrl
|
||||
maxTokens // Note: Vercel SDK might handle this differently, needs verification
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
@@ -48,21 +37,28 @@ async function generateGoogleText({
|
||||
log('info', `Generating text with Google model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||
const model = googleProvider(modelId); // Correct model retrieval
|
||||
|
||||
// Construct payload suitable for Vercel SDK's generateText
|
||||
// Note: The exact structure might depend on how messages are passed
|
||||
const result = await generateText({
|
||||
model,
|
||||
messages,
|
||||
model, // Pass the model instance
|
||||
messages, // Pass the messages array directly
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
maxOutputTokens: maxTokens // Map to correct Vercel SDK param if available
|
||||
});
|
||||
return result.text;
|
||||
|
||||
// Assuming result structure provides text directly or within a property
|
||||
return result.text; // Adjust based on actual SDK response
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
`Error generating text with Google (${modelId}): ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
throw error; // Re-throw for unified service handler
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,8 +79,7 @@ async function streamGoogleText({
|
||||
modelId = DEFAULT_MODEL,
|
||||
temperature = DEFAULT_TEMPERATURE,
|
||||
messages,
|
||||
maxTokens,
|
||||
baseUrl
|
||||
maxTokens
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
@@ -92,15 +87,19 @@ async function streamGoogleText({
|
||||
log('info', `Streaming text with Google model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||
const model = googleProvider(modelId); // Correct model retrieval
|
||||
|
||||
const stream = await streamText({
|
||||
model,
|
||||
model, // Pass the model instance
|
||||
messages,
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
});
|
||||
return stream;
|
||||
|
||||
return stream; // Return the stream directly
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
@@ -131,8 +130,7 @@ async function generateGoogleObject({
|
||||
messages,
|
||||
schema,
|
||||
objectName, // Note: Vercel SDK might use this differently or not at all
|
||||
maxTokens,
|
||||
baseUrl
|
||||
maxTokens
|
||||
}) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Google API key is required.');
|
||||
@@ -140,16 +138,23 @@ async function generateGoogleObject({
|
||||
log('info', `Generating object with Google model: ${modelId}`);
|
||||
|
||||
try {
|
||||
const googleProvider = getClient(apiKey, baseUrl);
|
||||
const model = googleProvider(modelId);
|
||||
// const google = new GoogleGenerativeAI({ apiKey }); // Incorrect instantiation
|
||||
const googleProvider = createGoogleGenerativeAI({ apiKey }); // Correct instantiation
|
||||
// const model = google.getGenerativeModel({ model: modelId }); // Incorrect model retrieval
|
||||
const model = googleProvider(modelId); // Correct model retrieval
|
||||
|
||||
const { object } = await generateObject({
|
||||
model,
|
||||
model, // Pass the model instance
|
||||
schema,
|
||||
messages,
|
||||
temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
// Note: 'objectName' or 'mode' might not be directly applicable here
|
||||
// depending on how `@ai-sdk/google` handles `generateObject`.
|
||||
// Check SDK docs if specific tool calling/JSON mode needs explicit setup.
|
||||
});
|
||||
return object;
|
||||
|
||||
return object; // Return the parsed object
|
||||
} catch (error) {
|
||||
log(
|
||||
'error',
|
||||
|
||||
@@ -1,26 +1,16 @@
|
||||
import { createOpenAI } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
||||
import { generateObject } from 'ai'; // Import necessary functions from 'ai'
|
||||
import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
|
||||
import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai'
|
||||
import { log } from '../../scripts/modules/utils.js';
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) {
|
||||
throw new Error('OpenAI API key is required.');
|
||||
}
|
||||
return createOpenAI({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
export async function generateOpenAIText(params) {
|
||||
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
||||
log('debug', `generateOpenAIText called with model: ${modelId}`);
|
||||
|
||||
if (!apiKey) {
|
||||
@@ -33,15 +23,18 @@ export async function generateOpenAIText(params) {
|
||||
throw new Error('Invalid or empty messages array provided for OpenAI.');
|
||||
}
|
||||
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
|
||||
try {
|
||||
const result = await openaiClient.chat(messages, {
|
||||
// Updated: Use openaiClient.chat directly
|
||||
model: modelId,
|
||||
max_tokens: maxTokens,
|
||||
temperature
|
||||
});
|
||||
|
||||
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
|
||||
// This might need refinement based on testing the SDK's output.
|
||||
const textContent = result?.choices?.[0]?.message?.content?.trim();
|
||||
|
||||
if (!textContent) {
|
||||
@@ -72,12 +65,12 @@ export async function generateOpenAIText(params) {
|
||||
/**
|
||||
* Streams text using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature, baseUrl.
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
|
||||
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
|
||||
* @throws {Error} If API call fails.
|
||||
*/
|
||||
export async function streamOpenAIText(params) {
|
||||
const { apiKey, modelId, messages, maxTokens, temperature, baseUrl } = params;
|
||||
const { apiKey, modelId, messages, maxTokens, temperature } = params;
|
||||
log('debug', `streamOpenAIText called with model: ${modelId}`);
|
||||
|
||||
if (!apiKey) {
|
||||
@@ -92,10 +85,12 @@ export async function streamOpenAIText(params) {
|
||||
);
|
||||
}
|
||||
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
|
||||
try {
|
||||
// Use the streamText function from Vercel AI SDK core
|
||||
const stream = await openaiClient.chat.stream(messages, {
|
||||
// Updated: Use openaiClient.chat.stream
|
||||
model: modelId,
|
||||
max_tokens: maxTokens,
|
||||
temperature
|
||||
@@ -105,6 +100,7 @@ export async function streamOpenAIText(params) {
|
||||
'debug',
|
||||
`OpenAI streamText initiated successfully for model: ${modelId}`
|
||||
);
|
||||
// The Vercel SDK's streamText should directly return the stream object
|
||||
return stream;
|
||||
} catch (error) {
|
||||
log(
|
||||
@@ -121,7 +117,7 @@ export async function streamOpenAIText(params) {
|
||||
/**
|
||||
* Generates structured objects using OpenAI models via Vercel AI SDK.
|
||||
*
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature, baseUrl.
|
||||
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If API call fails or object generation fails.
|
||||
*/
|
||||
@@ -133,8 +129,7 @@ export async function generateOpenAIObject(params) {
|
||||
schema,
|
||||
objectName,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
temperature
|
||||
} = params;
|
||||
log(
|
||||
'debug',
|
||||
@@ -150,9 +145,10 @@ export async function generateOpenAIObject(params) {
|
||||
if (!objectName)
|
||||
throw new Error('Object name is required for OpenAI object generation.');
|
||||
|
||||
const openaiClient = getClient(apiKey, baseUrl);
|
||||
const openaiClient = createOpenAI({ apiKey });
|
||||
|
||||
try {
|
||||
// Use the imported generateObject function from 'ai' package
|
||||
const result = await generateObject({
|
||||
model: openaiClient(modelId),
|
||||
schema: schema,
|
||||
|
||||
@@ -2,14 +2,6 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
||||
import { generateText, streamText, generateObject } from 'ai';
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils.js is in scripts/modules
|
||||
|
||||
function getClient(apiKey, baseUrl) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
return createOpenRouter({
|
||||
apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates text using an OpenRouter chat model.
|
||||
*
|
||||
@@ -19,7 +11,6 @@ function getClient(apiKey, baseUrl) {
|
||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||
* @param {number} [params.temperature] - Sampling temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -29,7 +20,6 @@ async function generateOpenRouterText({
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest // Capture any other Vercel AI SDK compatible parameters
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
@@ -38,7 +28,7 @@ async function generateOpenRouterText({
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
|
||||
try {
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const openrouter = createOpenRouter({ apiKey });
|
||||
const model = openrouter.chat(modelId); // Assuming chat model
|
||||
|
||||
const { text } = await generateText({
|
||||
@@ -68,7 +58,6 @@ async function generateOpenRouterText({
|
||||
* @param {Array<object>} params.messages - Array of message objects (system, user, assistant).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens to generate.
|
||||
* @param {number} [params.temperature] - Sampling temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -78,7 +67,6 @@ async function streamOpenRouterText({
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
@@ -87,7 +75,7 @@ async function streamOpenRouterText({
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
|
||||
try {
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const openrouter = createOpenRouter({ apiKey });
|
||||
const model = openrouter.chat(modelId);
|
||||
|
||||
// Directly return the stream from the Vercel AI SDK function
|
||||
@@ -120,7 +108,6 @@ async function streamOpenRouterText({
|
||||
* @param {number} [params.maxRetries=3] - Max retries for object generation.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens.
|
||||
* @param {number} [params.temperature] - Temperature.
|
||||
* @param {string} [params.baseUrl] - Base URL for the OpenRouter API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If the API call fails or validation fails.
|
||||
*/
|
||||
@@ -133,7 +120,6 @@ async function generateOpenRouterObject({
|
||||
maxRetries = 3,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl,
|
||||
...rest
|
||||
}) {
|
||||
if (!apiKey) throw new Error('OpenRouter API key is required.');
|
||||
@@ -143,7 +129,7 @@ async function generateOpenRouterObject({
|
||||
throw new Error('Messages array cannot be empty.');
|
||||
|
||||
try {
|
||||
const openrouter = getClient(apiKey, baseUrl);
|
||||
const openrouter = createOpenRouter({ apiKey });
|
||||
const model = openrouter.chat(modelId);
|
||||
|
||||
const { object } = await generateObject({
|
||||
|
||||
@@ -10,13 +10,13 @@ import { log } from '../../scripts/modules/utils.js';
|
||||
|
||||
// --- Client Instantiation ---
|
||||
// Similar to Anthropic, this expects the resolved API key to be passed in.
|
||||
function getClient(apiKey, baseUrl) {
|
||||
function getClient(apiKey) {
|
||||
if (!apiKey) {
|
||||
throw new Error('Perplexity API key is required.');
|
||||
}
|
||||
// Create and return a new instance directly
|
||||
return createPerplexity({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
apiKey: apiKey
|
||||
});
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ function getClient(apiKey, baseUrl) {
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -40,12 +39,11 @@ export async function generatePerplexityText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
temperature
|
||||
}) {
|
||||
log('debug', `Generating Perplexity text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
@@ -72,7 +70,6 @@ export async function generatePerplexityText({
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
@@ -81,12 +78,11 @@ export async function streamPerplexityText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
temperature
|
||||
}) {
|
||||
log('debug', `Streaming Perplexity text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
const stream = await streamText({
|
||||
model: client(modelId),
|
||||
messages: messages,
|
||||
@@ -116,7 +112,6 @@ export async function streamPerplexityText({
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - Base URL for the Perplexity API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails or is unsupported.
|
||||
*/
|
||||
@@ -128,8 +123,7 @@ export async function generatePerplexityObject({
|
||||
objectName = 'generated_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 1,
|
||||
baseUrl
|
||||
maxRetries = 1 // Lower retries as support might be limited
|
||||
}) {
|
||||
log(
|
||||
'debug',
|
||||
@@ -140,7 +134,8 @@ export async function generatePerplexityObject({
|
||||
'generateObject support for Perplexity might be limited or experimental.'
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
// Attempt using generateObject, but be prepared for potential issues
|
||||
const result = await generateObject({
|
||||
model: client(modelId),
|
||||
schema: schema,
|
||||
|
||||
@@ -9,13 +9,14 @@ import { generateText, streamText, generateObject } from 'ai'; // Only import wh
|
||||
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
|
||||
|
||||
// --- Client Instantiation ---
|
||||
function getClient(apiKey, baseUrl) {
|
||||
function getClient(apiKey) {
|
||||
if (!apiKey) {
|
||||
throw new Error('xAI API key is required.');
|
||||
}
|
||||
// Create and return a new instance directly
|
||||
return createXai({
|
||||
apiKey: apiKey,
|
||||
...(baseUrl && { baseURL: baseUrl })
|
||||
apiKey: apiKey
|
||||
// Add baseURL or other options if needed later
|
||||
});
|
||||
}
|
||||
|
||||
@@ -30,7 +31,6 @@ function getClient(apiKey, baseUrl) {
|
||||
* @param {Array<object>} params.messages - The messages array (e.g., [{ role: 'user', content: '...' }]).
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<string>} The generated text content.
|
||||
* @throws {Error} If the API call fails.
|
||||
*/
|
||||
@@ -39,14 +39,13 @@ export async function generateXaiText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
temperature
|
||||
}) {
|
||||
log('debug', `Generating xAI text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
const result = await generateText({
|
||||
model: client(modelId),
|
||||
model: client(modelId), // Correct model invocation
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
@@ -71,7 +70,6 @@ export async function generateXaiText({
|
||||
* @param {Array<object>} params.messages - The messages array.
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<object>} The full stream result object from the Vercel AI SDK.
|
||||
* @throws {Error} If the API call fails to initiate the stream.
|
||||
*/
|
||||
@@ -80,19 +78,18 @@ export async function streamXaiText({
|
||||
modelId,
|
||||
messages,
|
||||
maxTokens,
|
||||
temperature,
|
||||
baseUrl
|
||||
temperature
|
||||
}) {
|
||||
log('debug', `Streaming xAI text with model: ${modelId}`);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
const stream = await streamText({
|
||||
model: client(modelId),
|
||||
model: client(modelId), // Correct model invocation
|
||||
messages: messages,
|
||||
maxTokens: maxTokens,
|
||||
temperature: temperature
|
||||
});
|
||||
return stream;
|
||||
return stream; // Return the full stream object
|
||||
} catch (error) {
|
||||
log('error', `xAI streamText failed: ${error.message}`, error.stack);
|
||||
throw error;
|
||||
@@ -113,7 +110,6 @@ export async function streamXaiText({
|
||||
* @param {number} [params.maxTokens] - Maximum tokens for the response.
|
||||
* @param {number} [params.temperature] - Temperature for generation.
|
||||
* @param {number} [params.maxRetries] - Max retries for validation/generation.
|
||||
* @param {string} [params.baseUrl] - The base URL for the xAI API.
|
||||
* @returns {Promise<object>} The generated object matching the schema.
|
||||
* @throws {Error} If generation or validation fails.
|
||||
*/
|
||||
@@ -125,17 +121,16 @@ export async function generateXaiObject({
|
||||
objectName = 'generated_xai_object',
|
||||
maxTokens,
|
||||
temperature,
|
||||
maxRetries = 3,
|
||||
baseUrl
|
||||
maxRetries = 3
|
||||
}) {
|
||||
log(
|
||||
'warn',
|
||||
'warn', // Log warning as this is likely unsupported
|
||||
`Attempting to generate xAI object ('${objectName}') with model: ${modelId}. This may not be supported by the provider.`
|
||||
);
|
||||
try {
|
||||
const client = getClient(apiKey, baseUrl);
|
||||
const client = getClient(apiKey);
|
||||
const result = await generateObject({
|
||||
model: client(modelId),
|
||||
model: client(modelId), // Correct model invocation
|
||||
// Note: mode might need adjustment if xAI ever supports object generation differently
|
||||
mode: 'tool',
|
||||
schema: schema,
|
||||
@@ -158,6 +153,6 @@ export async function generateXaiObject({
|
||||
'error',
|
||||
`xAI generateObject ('${objectName}') failed: ${error.message}. (Likely unsupported by provider)`
|
||||
);
|
||||
throw error;
|
||||
throw error; // Re-throw the error
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
/**
|
||||
* @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus
|
||||
*/
|
||||
|
||||
/**
|
||||
* Task status options list
|
||||
* @type {TaskStatus[]}
|
||||
* @description Defines possible task statuses:
|
||||
* - pending: Task waiting to start
|
||||
* - done: Task completed
|
||||
* - in-progress: Task in progress
|
||||
* - review: Task completed and waiting for review
|
||||
* - deferred: Task postponed or paused
|
||||
* - cancelled: Task cancelled and will not be completed
|
||||
*/
|
||||
export const TASK_STATUS_OPTIONS = [
|
||||
'pending',
|
||||
'done',
|
||||
'in-progress',
|
||||
'review',
|
||||
'deferred',
|
||||
'cancelled'
|
||||
];
|
||||
|
||||
/**
|
||||
* Check if a given status is a valid task status
|
||||
* @param {string} status - The status to check
|
||||
* @returns {boolean} True if the status is valid, false otherwise
|
||||
*/
|
||||
export function isValidTaskStatus(status) {
|
||||
return TASK_STATUS_OPTIONS.includes(status);
|
||||
}
|
||||
@@ -3,8 +3,9 @@
|
||||
*/
|
||||
|
||||
import { jest } from '@jest/globals';
|
||||
import path, { dirname } from 'path';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { dirname } from 'path';
|
||||
|
||||
// Get the current module's directory
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
@@ -26,7 +27,6 @@ const mockReadJSON = jest.fn();
|
||||
const mockWriteJSON = jest.fn();
|
||||
const mockEnableSilentMode = jest.fn();
|
||||
const mockDisableSilentMode = jest.fn();
|
||||
const mockReadComplexityReport = jest.fn().mockReturnValue(null);
|
||||
|
||||
const mockGetAnthropicClient = jest.fn().mockReturnValue({});
|
||||
const mockGetConfiguredAnthropicClient = jest.fn().mockReturnValue({});
|
||||
@@ -130,7 +130,6 @@ jest.mock('../../../scripts/modules/utils.js', () => ({
|
||||
writeJSON: mockWriteJSON,
|
||||
enableSilentMode: mockEnableSilentMode,
|
||||
disableSilentMode: mockDisableSilentMode,
|
||||
readComplexityReport: mockReadComplexityReport,
|
||||
CONFIG: {
|
||||
model: 'claude-3-7-sonnet-20250219',
|
||||
maxTokens: 64000,
|
||||
@@ -161,6 +160,15 @@ jest.mock('../../../scripts/modules/task-manager.js', () => ({
|
||||
}));
|
||||
|
||||
// Import dependencies after mocks are set up
|
||||
import fs from 'fs';
|
||||
import {
|
||||
readJSON,
|
||||
writeJSON,
|
||||
enableSilentMode,
|
||||
disableSilentMode
|
||||
} from '../../../scripts/modules/utils.js';
|
||||
import { expandTask } from '../../../scripts/modules/task-manager.js';
|
||||
import { findTasksJsonPath } from '../../../mcp-server/src/core/utils/path-utils.js';
|
||||
import { sampleTasks } from '../../fixtures/sample-tasks.js';
|
||||
|
||||
// Mock logger
|
||||
@@ -212,37 +220,6 @@ describe('MCP Server Direct Functions', () => {
|
||||
});
|
||||
|
||||
describe('listTasksDirect', () => {
|
||||
// Sample complexity report for testing
|
||||
const mockComplexityReport = {
|
||||
meta: {
|
||||
generatedAt: '2025-03-24T20:01:35.986Z',
|
||||
tasksAnalyzed: 3,
|
||||
thresholdScore: 5,
|
||||
projectName: 'Test Project',
|
||||
usedResearch: false
|
||||
},
|
||||
complexityAnalysis: [
|
||||
{
|
||||
taskId: 1,
|
||||
taskTitle: 'Initialize Project',
|
||||
complexityScore: 3,
|
||||
recommendedSubtasks: 2
|
||||
},
|
||||
{
|
||||
taskId: 2,
|
||||
taskTitle: 'Create Core Functionality',
|
||||
complexityScore: 8,
|
||||
recommendedSubtasks: 5
|
||||
},
|
||||
{
|
||||
taskId: 3,
|
||||
taskTitle: 'Implement UI Components',
|
||||
complexityScore: 6,
|
||||
recommendedSubtasks: 4
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
// Test wrapper function that doesn't rely on the actual implementation
|
||||
async function testListTasks(args, mockLogger) {
|
||||
// File not found case
|
||||
@@ -258,35 +235,21 @@ describe('MCP Server Direct Functions', () => {
|
||||
};
|
||||
}
|
||||
|
||||
// Check for complexity report
|
||||
const complexityReport = mockReadComplexityReport();
|
||||
let tasksData = [...sampleTasks.tasks];
|
||||
|
||||
// Add complexity scores if report exists
|
||||
if (complexityReport && complexityReport.complexityAnalysis) {
|
||||
tasksData = tasksData.map((task) => {
|
||||
const analysis = complexityReport.complexityAnalysis.find(
|
||||
(a) => a.taskId === task.id
|
||||
);
|
||||
if (analysis) {
|
||||
return { ...task, complexityScore: analysis.complexityScore };
|
||||
}
|
||||
return task;
|
||||
});
|
||||
}
|
||||
|
||||
// Success case
|
||||
if (!args.status && !args.withSubtasks) {
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
tasks: tasksData,
|
||||
tasks: sampleTasks.tasks,
|
||||
stats: {
|
||||
total: tasksData.length,
|
||||
completed: tasksData.filter((t) => t.status === 'done').length,
|
||||
inProgress: tasksData.filter((t) => t.status === 'in-progress')
|
||||
total: sampleTasks.tasks.length,
|
||||
completed: sampleTasks.tasks.filter((t) => t.status === 'done')
|
||||
.length,
|
||||
pending: tasksData.filter((t) => t.status === 'pending').length
|
||||
inProgress: sampleTasks.tasks.filter(
|
||||
(t) => t.status === 'in-progress'
|
||||
).length,
|
||||
pending: sampleTasks.tasks.filter((t) => t.status === 'pending')
|
||||
.length
|
||||
}
|
||||
},
|
||||
fromCache: false
|
||||
@@ -295,14 +258,16 @@ describe('MCP Server Direct Functions', () => {
|
||||
|
||||
// Status filter case
|
||||
if (args.status) {
|
||||
const filteredTasks = tasksData.filter((t) => t.status === args.status);
|
||||
const filteredTasks = sampleTasks.tasks.filter(
|
||||
(t) => t.status === args.status
|
||||
);
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
tasks: filteredTasks,
|
||||
filter: args.status,
|
||||
stats: {
|
||||
total: tasksData.length,
|
||||
total: sampleTasks.tasks.length,
|
||||
filtered: filteredTasks.length
|
||||
}
|
||||
},
|
||||
@@ -315,10 +280,10 @@ describe('MCP Server Direct Functions', () => {
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
tasks: tasksData,
|
||||
tasks: sampleTasks.tasks,
|
||||
includeSubtasks: true,
|
||||
stats: {
|
||||
total: tasksData.length
|
||||
total: sampleTasks.tasks.length
|
||||
}
|
||||
},
|
||||
fromCache: false
|
||||
@@ -405,29 +370,6 @@ describe('MCP Server Direct Functions', () => {
|
||||
expect(result.error.code).toBe('FILE_NOT_FOUND_ERROR');
|
||||
expect(mockLogger.error).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should include complexity scores when complexity report exists', async () => {
|
||||
// Arrange
|
||||
mockReadComplexityReport.mockReturnValueOnce(mockComplexityReport);
|
||||
const args = {
|
||||
projectRoot: testProjectRoot,
|
||||
file: testTasksPath,
|
||||
withSubtasks: true
|
||||
};
|
||||
|
||||
// Act
|
||||
const result = await testListTasks(args, mockLogger);
|
||||
// Assert
|
||||
expect(result.success).toBe(true);
|
||||
|
||||
// Check that tasks have complexity scores from the report
|
||||
mockComplexityReport.complexityAnalysis.forEach((analysis) => {
|
||||
const task = result.data.tasks.find((t) => t.id === analysis.taskId);
|
||||
if (task) {
|
||||
expect(task.complexityScore).toBe(analysis.complexityScore);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('expandTaskDirect', () => {
|
||||
|
||||
@@ -8,7 +8,6 @@ const mockGetResearchModelId = jest.fn();
|
||||
const mockGetFallbackProvider = jest.fn();
|
||||
const mockGetFallbackModelId = jest.fn();
|
||||
const mockGetParametersForRole = jest.fn();
|
||||
const mockGetBaseUrlForRole = jest.fn();
|
||||
|
||||
jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||
getMainProvider: mockGetMainProvider,
|
||||
@@ -17,8 +16,7 @@ jest.unstable_mockModule('../../scripts/modules/config-manager.js', () => ({
|
||||
getResearchModelId: mockGetResearchModelId,
|
||||
getFallbackProvider: mockGetFallbackProvider,
|
||||
getFallbackModelId: mockGetFallbackModelId,
|
||||
getParametersForRole: mockGetParametersForRole,
|
||||
getBaseUrlForRole: mockGetBaseUrlForRole
|
||||
getParametersForRole: mockGetParametersForRole
|
||||
}));
|
||||
|
||||
// Mock AI Provider Modules
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
* Task finder tests
|
||||
*/
|
||||
|
||||
// Import after mocks are set up - No mocks needed for readComplexityReport anymore
|
||||
import { findTaskById } from '../../scripts/modules/utils.js';
|
||||
import { emptySampleTasks, sampleTasks } from '../fixtures/sample-tasks.js';
|
||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
||||
|
||||
describe('Task Finder', () => {
|
||||
describe('findTaskById function', () => {
|
||||
@@ -56,62 +55,5 @@ describe('Task Finder', () => {
|
||||
expect(result.task).toBeNull();
|
||||
expect(result.originalSubtaskCount).toBeNull();
|
||||
});
|
||||
test('should work correctly when no complexity report is provided', () => {
|
||||
// Pass null as the complexity report
|
||||
const result = findTaskById(sampleTasks.tasks, 2, null);
|
||||
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.task.id).toBe(2);
|
||||
expect(result.task.complexityScore).toBeUndefined();
|
||||
});
|
||||
test('should work correctly when task has no complexity data in the provided report', () => {
|
||||
// Define a complexity report that doesn't include task 2
|
||||
const complexityReport = {
|
||||
complexityAnalysis: [{ taskId: 999, complexityScore: 5 }]
|
||||
};
|
||||
|
||||
const result = findTaskById(sampleTasks.tasks, 2, complexityReport);
|
||||
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.task.id).toBe(2);
|
||||
expect(result.task.complexityScore).toBeUndefined();
|
||||
});
|
||||
|
||||
test('should include complexity score when report is provided', () => {
|
||||
// Define the complexity report for this test
|
||||
const complexityReport = {
|
||||
meta: {
|
||||
generatedAt: '2023-01-01T00:00:00.000Z',
|
||||
tasksAnalyzed: 3,
|
||||
thresholdScore: 5
|
||||
},
|
||||
complexityAnalysis: [
|
||||
{
|
||||
taskId: 1,
|
||||
taskTitle: 'Initialize Project',
|
||||
complexityScore: 3,
|
||||
recommendedSubtasks: 2
|
||||
},
|
||||
{
|
||||
taskId: 2,
|
||||
taskTitle: 'Create Core Functionality',
|
||||
complexityScore: 8,
|
||||
recommendedSubtasks: 5
|
||||
},
|
||||
{
|
||||
taskId: 3,
|
||||
taskTitle: 'Implement UI Components',
|
||||
complexityScore: 6,
|
||||
recommendedSubtasks: 4
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
const result = findTaskById(sampleTasks.tasks, 2, complexityReport);
|
||||
|
||||
expect(result.task).toBeDefined();
|
||||
expect(result.task.id).toBe(2);
|
||||
expect(result.task.complexityScore).toBe(8);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -199,12 +199,6 @@ const testSetTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||
|
||||
// Simplified version of updateSingleTaskStatus for testing
|
||||
const testUpdateSingleTaskStatus = (tasksData, taskIdInput, newStatus) => {
|
||||
if (!isValidTaskStatus(newStatus)) {
|
||||
throw new Error(
|
||||
`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`
|
||||
);
|
||||
}
|
||||
|
||||
// Check if it's a subtask (e.g., "1.2")
|
||||
if (taskIdInput.includes('.')) {
|
||||
const [parentId, subtaskId] = taskIdInput
|
||||
@@ -335,10 +329,6 @@ const testAddTask = (
|
||||
import * as taskManager from '../../scripts/modules/task-manager.js';
|
||||
import { sampleClaudeResponse } from '../fixtures/sample-claude-response.js';
|
||||
import { sampleTasks, emptySampleTasks } from '../fixtures/sample-tasks.js';
|
||||
import {
|
||||
isValidTaskStatus,
|
||||
TASK_STATUS_OPTIONS
|
||||
} from '../../src/constants/task-status.js';
|
||||
|
||||
// Destructure the required functions for convenience
|
||||
const { findNextTask, generateTaskFiles, clearSubtasks, updateTaskById } =
|
||||
@@ -1175,16 +1165,6 @@ describe('Task Manager Module', () => {
|
||||
expect(testTasksData.tasks[1].status).toBe('done');
|
||||
});
|
||||
|
||||
test('should throw error for invalid status', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
// Assert
|
||||
expect(() =>
|
||||
testUpdateSingleTaskStatus(testTasksData, '2', 'Done')
|
||||
).toThrow(/Error: Invalid status value: Done./);
|
||||
});
|
||||
|
||||
test('should update subtask status', async () => {
|
||||
// Arrange
|
||||
const testTasksData = JSON.parse(JSON.stringify(sampleTasks));
|
||||
|
||||
Reference in New Issue
Block a user