feat(config): Implement new config system and resolve refactoring errors Introduced config-manager.js and new utilities (resolveEnvVariable, findProjectRoot). Removed old global CONFIG object from utils.js. Updated .taskmasterconfig, mcp.json, and .env.example. Added generateComplexityAnalysisPrompt to ui.js. Removed unused updateSubtaskById from task-manager.js. Resolved SyntaxError and ReferenceError issues across commands.js, ui.js, task-manager.js, and ai-services.js by replacing CONFIG references with config-manager getters (getDebugFlag, getProjectName, getDefaultSubtasks, isApiKeySet). Refactored 'models' command to use getConfig/writeConfig. Simplified version checking. This stabilizes the codebase after initial Task 61 refactoring, fixing CLI errors and enabling subsequent work on Subtasks 61.34 and 61.35.

This commit is contained in:
Eyal Toledano
2025-04-20 01:09:30 -04:00
parent 845f8009ef
commit 292dd51417
16 changed files with 3454 additions and 797 deletions

View File

@@ -4,14 +4,14 @@
"command": "node", "command": "node",
"args": ["./mcp-server/server.js"], "args": ["./mcp-server/server.js"],
"env": { "env": {
"ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE", "ANTHROPIC_API_KEY": "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",
"PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE", "PERPLEXITY_API_KEY": "pplx-1234567890",
"MODEL": "claude-3-7-sonnet-20250219", "OPENAI_API_KEY": "sk-proj-1234567890",
"PERPLEXITY_MODEL": "sonar-pro", "GOOGLE_API_KEY": "AIzaSyB1234567890",
"MAX_TOKENS": 64000, "GROK_API_KEY": "gsk_1234567890",
"TEMPERATURE": 0.2, "MISTRAL_API_KEY": "mst_1234567890",
"DEFAULT_SUBTASKS": 5, "AZURE_OPENAI_API_KEY": "1234567890",
"DEFAULT_PRIORITY": "medium" "AZURE_OPENAI_ENDPOINT": "https://your-endpoint.openai.azure.com/"
} }
} }
} }

View File

@@ -2,15 +2,29 @@
"models": { "models": {
"main": { "main": {
"provider": "google", "provider": "google",
"modelId": "gemini-2.5-pro-latest" "modelId": "gemini-2.5-pro-latest",
"maxTokens": 256000,
"temperature": 0.2
}, },
"research": { "research": {
"provider": "perplexity", "provider": "perplexity",
"modelId": "deep-research" "modelId": "sonar-pro",
"maxTokens": 8700,
"temperature": 0.1
}, },
"fallback": { "fallback": {
"provider": "anthropic", "provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219" "modelId": "claude-3-7-sonnet-20250219",
} "maxTokens": 128000,
"temperature": 0.2
}
},
"global": {
"logLevel": "info",
"debug": false,
"defaultSubtasks": 5,
"defaultPriority": "medium",
"projectName": "Task Master",
"ollamaBaseUrl": "http://localhost:11434/api"
} }
} }

View File

@@ -1,9 +1,12 @@
# Required # API Keys (Required to enable respective provider)
ANTHROPIC_API_KEY=your-api-key-here # For most AI ops -- Format: sk-ant-api03-... (Required) ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-...
PERPLEXITY_API_KEY=pplx-abcde # For research -- Format: pplx-abcde (Optional, Highly Recommended) PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-...
OPENAI_API_KEY=sk-proj-... # For OpenAI/OpenRouter models (Optional) -- Format: sk-proj-... OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-...
GOOGLE_API_KEY=AIzaSy... # For Google Gemini models (Optional) GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models.
GROK_API_KEY=your-grok-api-key-here # For XAI Grok models (Optional) GROK_API_KEY=your_grok_api_key_here # Optional, for XAI Grok models.
MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models.
AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models.
AZURE_OPENAI_ENDPOINT=your_azure_endpoint_here # Optional, for Azure OpenAI.
# Optional - defaults shown # Optional - defaults shown
MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229 (Required) MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229 (Required)
@@ -15,3 +18,4 @@ LOG_LEVEL=info # Log level (debug, info, warn, error)
DEFAULT_SUBTASKS=5 # Default number of subtasks when expanding DEFAULT_SUBTASKS=5 # Default number of subtasks when expanding
DEFAULT_PRIORITY=medium # Default priority for generated tasks (high, medium, low) DEFAULT_PRIORITY=medium # Default priority for generated tasks (high, medium, low)
PROJECT_NAME={{projectName}} # Project name for tasks.json metadata PROJECT_NAME={{projectName}} # Project name for tasks.json metadata
OLLAMA_BASE_URL=http://localhost:11434/api # Base URL for local Ollama instance (Optional)

View File

@@ -0,0 +1,368 @@
/**
* ai-services-unified.js
* Centralized AI service layer using ai-client-factory and AI SDK core functions.
*/
import { generateText } from 'ai';
import { getClient } from './ai-client-factory.js';
import { log } from './utils.js'; // Import log for retry logging
// Import logger from utils later when needed
// import { log } from './utils.js';
// --- Configuration for Retries ---
const MAX_RETRIES = 2; // Total attempts = 1 + MAX_RETRIES
const INITIAL_RETRY_DELAY_MS = 1000; // 1 second
// Helper function to check if an error is retryable
function isRetryableError(error) {
const errorMessage = error.message?.toLowerCase() || '';
// Add common retryable error patterns
return (
errorMessage.includes('rate limit') ||
errorMessage.includes('overloaded') ||
errorMessage.includes('service temporarily unavailable') ||
errorMessage.includes('timeout') ||
errorMessage.includes('network error') ||
// Add specific status codes if available from the SDK errors
error.status === 429 || // Too Many Requests
error.status >= 500 // Server-side errors
);
}
/**
* Internal helper to attempt an AI SDK API call with retries.
*
* @param {object} client - The AI client instance.
* @param {function} apiCallFn - The AI SDK function to call (e.g., generateText).
* @param {object} apiParams - Parameters for the AI SDK function (excluding model).
* @param {string} attemptRole - The role being attempted (for logging).
* @returns {Promise<object>} The result from the successful API call.
* @throws {Error} If the call fails after all retries.
*/
async function _attemptApiCallWithRetries(
client,
apiCallFn,
apiParams,
attemptRole
) {
let retries = 0;
while (retries <= MAX_RETRIES) {
try {
log(
'info',
`Attempt ${retries + 1}/${MAX_RETRIES + 1} calling ${apiCallFn.name} for role ${attemptRole}`
);
// Call the provided AI SDK function (generateText, streamText, etc.)
const result = await apiCallFn({ model: client, ...apiParams });
log(
'info',
`${apiCallFn.name} succeeded for role ${attemptRole} on attempt ${retries + 1}`
);
return result; // Success!
} catch (error) {
log(
'warn',
`Attempt ${retries + 1} failed for role ${attemptRole} (${apiCallFn.name}): ${error.message}`
);
if (isRetryableError(error) && retries < MAX_RETRIES) {
retries++;
const delay = INITIAL_RETRY_DELAY_MS * Math.pow(2, retries - 1);
log(
'info',
`Retryable error detected. Retrying in ${delay / 1000}s...`
);
await new Promise((resolve) => setTimeout(resolve, delay));
} else {
log(
'error',
`Non-retryable error or max retries reached for role ${attemptRole} (${apiCallFn.name}).`
);
throw error; // Final failure for this attempt chain
}
}
}
// Should theoretically not be reached due to throw in the else block, but needed for linting/type safety
throw new Error(
`Exhausted all retries for role ${attemptRole} (${apiCallFn.name})`
);
}
/**
* Unified service function for generating text.
* Handles client retrieval, retries, and fallback (main -> fallback -> research).
* TODO: Add detailed logging.
*
* @param {object} params - Parameters for the service call.
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
* @param {object} [params.session=null] - Optional MCP session object.
* @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory { provider, modelId }.
* @param {string} params.prompt - The prompt for the AI.
* @param {number} [params.maxTokens] - Max tokens for the generation.
* @param {number} [params.temperature] - Temperature setting.
* // ... include other standard generateText options as needed ...
* @returns {Promise<object>} The result from the AI SDK's generateText function.
*/
async function generateTextService(params) {
const {
role: initialRole,
session,
overrideOptions,
...generateTextParams
} = params;
log('info', 'generateTextService called', { role: initialRole });
// Determine the sequence explicitly based on the initial role
let sequence;
if (initialRole === 'main') {
sequence = ['main', 'fallback', 'research'];
} else if (initialRole === 'fallback') {
sequence = ['fallback', 'research']; // Try fallback, then research
} else if (initialRole === 'research') {
sequence = ['research', 'fallback']; // Try research, then fallback
} else {
// Default sequence if initialRole is unknown or invalid
log(
'warn',
`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`
);
sequence = ['main', 'fallback', 'research'];
}
let lastError = null;
// Iterate through the determined sequence
for (const currentRole of sequence) {
// Removed the complex conditional check, as the sequence is now pre-determined
log('info', `Attempting service call with role: ${currentRole}`);
let client;
try {
client = await getClient(currentRole, session, overrideOptions);
const clientInfo = {
provider: client?.provider || 'unknown',
model: client?.modelId || client?.model || 'unknown'
};
log('info', 'Retrieved AI client', clientInfo);
// Attempt the API call with retries using the helper
const result = await _attemptApiCallWithRetries(
client,
generateText,
generateTextParams,
currentRole
);
log('info', `generateTextService succeeded using role: ${currentRole}`); // Add success log
return result; // Success!
} catch (error) {
log(
'error', // Log as error since this role attempt failed
`Service call failed for role ${currentRole}: ${error.message}`
);
lastError = error; // Store the error to throw if all roles in sequence fail
// Log the reason for moving to the next role
if (!client) {
log(
'warn',
`Could not get client for role ${currentRole}, trying next role in sequence...`
);
} else {
// Error happened during API call after client was retrieved
log(
'warn',
`Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...`
);
}
// Continue to the next role in the sequence automatically
}
}
// If loop completes, all roles in the sequence failed
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
throw (
lastError ||
new Error(
'AI service call failed for all configured roles in the sequence.'
)
);
}
// TODO: Implement streamTextService, generateObjectService etc.
/**
* Unified service function for streaming text.
* Handles client retrieval, retries, and fallback sequence.
*
* @param {object} params - Parameters for the service call.
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
* @param {object} [params.session=null] - Optional MCP session object.
* @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory.
* @param {string} params.prompt - The prompt for the AI.
* // ... include other standard streamText options as needed ...
* @returns {Promise<object>} The result from the AI SDK's streamText function (typically a Streamable object).
*/
async function streamTextService(params) {
const {
role: initialRole,
session,
overrideOptions,
...streamTextParams // Collect remaining params for streamText
} = params;
log('info', 'streamTextService called', { role: initialRole });
let sequence;
if (initialRole === 'main') {
sequence = ['main', 'fallback', 'research'];
} else if (initialRole === 'fallback') {
sequence = ['fallback', 'research'];
} else if (initialRole === 'research') {
sequence = ['research', 'fallback'];
} else {
log(
'warn',
`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`
);
sequence = ['main', 'fallback', 'research'];
}
let lastError = null;
for (const currentRole of sequence) {
log('info', `Attempting service call with role: ${currentRole}`);
let client;
try {
client = await getClient(currentRole, session, overrideOptions);
const clientInfo = {
provider: client?.provider || 'unknown',
model: client?.modelId || client?.model || 'unknown'
};
log('info', 'Retrieved AI client', clientInfo);
const result = await _attemptApiCallWithRetries(
client,
streamText, // Pass streamText function
streamTextParams,
currentRole
);
log('info', `streamTextService succeeded using role: ${currentRole}`);
return result;
} catch (error) {
log(
'error',
`Service call failed for role ${currentRole}: ${error.message}`
);
lastError = error;
if (!client) {
log(
'warn',
`Could not get client for role ${currentRole}, trying next role in sequence...`
);
} else {
log(
'warn',
`Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...`
);
}
}
}
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
throw (
lastError ||
new Error(
'AI service call (streamText) failed for all configured roles in the sequence.'
)
);
}
/**
* Unified service function for generating structured objects.
* Handles client retrieval, retries, and fallback sequence.
*
* @param {object} params - Parameters for the service call.
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
* @param {object} [params.session=null] - Optional MCP session object.
* @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory.
* @param {z.Schema} params.schema - The Zod schema for the expected object.
* @param {string} params.prompt - The prompt for the AI.
* // ... include other standard generateObject options as needed ...
* @returns {Promise<object>} The result from the AI SDK's generateObject function.
*/
async function generateObjectService(params) {
const {
role: initialRole,
session,
overrideOptions,
...generateObjectParams // Collect remaining params for generateObject
} = params;
log('info', 'generateObjectService called', { role: initialRole });
let sequence;
if (initialRole === 'main') {
sequence = ['main', 'fallback', 'research'];
} else if (initialRole === 'fallback') {
sequence = ['fallback', 'research'];
} else if (initialRole === 'research') {
sequence = ['research', 'fallback'];
} else {
log(
'warn',
`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`
);
sequence = ['main', 'fallback', 'research'];
}
let lastError = null;
for (const currentRole of sequence) {
log('info', `Attempting service call with role: ${currentRole}`);
let client;
try {
client = await getClient(currentRole, session, overrideOptions);
const clientInfo = {
provider: client?.provider || 'unknown',
model: client?.modelId || client?.model || 'unknown'
};
log('info', 'Retrieved AI client', clientInfo);
const result = await _attemptApiCallWithRetries(
client,
generateObject, // Pass generateObject function
generateObjectParams,
currentRole
);
log('info', `generateObjectService succeeded using role: ${currentRole}`);
return result;
} catch (error) {
log(
'error',
`Service call failed for role ${currentRole}: ${error.message}`
);
lastError = error;
if (!client) {
log(
'warn',
`Could not get client for role ${currentRole}, trying next role in sequence...`
);
} else {
log(
'warn',
`Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...`
);
}
}
}
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
throw (
lastError ||
new Error(
'AI service call (generateObject) failed for all configured roles in the sequence.'
)
);
}
export { generateTextService, streamTextService, generateObjectService };

View File

@@ -8,9 +8,18 @@
import { Anthropic } from '@anthropic-ai/sdk'; import { Anthropic } from '@anthropic-ai/sdk';
import OpenAI from 'openai'; import OpenAI from 'openai';
import dotenv from 'dotenv'; import dotenv from 'dotenv';
import { CONFIG, log, sanitizePrompt, isSilentMode } from './utils.js'; import { log, sanitizePrompt, isSilentMode } from './utils.js';
import { startLoadingIndicator, stopLoadingIndicator } from './ui.js'; import { startLoadingIndicator, stopLoadingIndicator } from './ui.js';
import chalk from 'chalk'; import chalk from 'chalk';
import {
getMainModelId,
getMainMaxTokens,
getMainTemperature,
getDebugFlag,
getResearchModelId,
getResearchMaxTokens,
getResearchTemperature
} from './config-manager.js';
// Load environment variables // Load environment variables
dotenv.config(); dotenv.config();
@@ -218,7 +227,7 @@ Important: Your response must be valid JSON only, with no additional explanation
prdContent, prdContent,
prdPath, prdPath,
numTasks, numTasks,
modelConfig?.maxTokens || CONFIG.maxTokens, modelConfig?.maxTokens || getMainMaxTokens(null),
systemPrompt, systemPrompt,
{ reportProgress, mcpLog, session }, { reportProgress, mcpLog, session },
aiClient || anthropic, aiClient || anthropic,
@@ -254,7 +263,7 @@ Important: Your response must be valid JSON only, with no additional explanation
); );
} else { } else {
console.error(chalk.red(userMessage)); console.error(chalk.red(userMessage));
if (CONFIG.debug) { if (getDebugFlag(null)) {
log('debug', 'Full error:', error); log('debug', 'Full error:', error);
} }
throw new Error(userMessage); throw new Error(userMessage);
@@ -287,54 +296,46 @@ async function handleStreamingRequest(
aiClient = null, aiClient = null,
modelConfig = null modelConfig = null
) { ) {
// Determine output format based on mcpLog presence
const outputFormat = mcpLog ? 'json' : 'text';
// Create custom reporter that checks for MCP log and silent mode
const report = (message, level = 'info') => { const report = (message, level = 'info') => {
if (mcpLog) { if (mcpLog && typeof mcpLog[level] === 'function') {
mcpLog[level](message); mcpLog[level](message);
} else if (!isSilentMode() && outputFormat === 'text') { } else if (!isSilentMode()) {
// Only log to console if not in silent mode and outputFormat is 'text'
log(level, message); log(level, message);
} }
}; };
// Only show loading indicators for text output (CLI) let loadingIndicator;
let loadingIndicator = null; if (!isSilentMode() && !mcpLog) {
if (outputFormat === 'text' && !isSilentMode()) { loadingIndicator = startLoadingIndicator('Claude is thinking...');
loadingIndicator = startLoadingIndicator('Generating tasks from PRD...');
} }
if (reportProgress) { let textContent = '';
await reportProgress({ progress: 0 }); let finalResponse = null;
} let claudeOverloaded = false;
let responseText = '';
let streamingInterval = null;
try { try {
// Use streaming for handling large responses const modelToUse = modelConfig?.modelId || getMainModelId(null);
const stream = await (aiClient || anthropic).messages.create({ const temperatureToUse =
model: modelConfig?.temperature || getMainTemperature(null);
modelConfig?.model || session?.env?.ANTHROPIC_MODEL || CONFIG.model, const clientToUse = aiClient || anthropic;
max_tokens:
modelConfig?.maxTokens || session?.env?.MAX_TOKENS || maxTokens, report(`Using model: ${modelToUse} with temp: ${temperatureToUse}`);
temperature:
modelConfig?.temperature || const stream = await clientToUse.messages.stream({
session?.env?.TEMPERATURE || model: modelToUse,
CONFIG.temperature, max_tokens: maxTokens,
temperature: temperatureToUse,
system: systemPrompt, system: systemPrompt,
messages: [ messages: [
{ {
role: 'user', role: 'user',
content: `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:\n\n${prdContent}` content: `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:\n\n${prdContent}`
} }
], ]
stream: true
}); });
// Update loading indicator to show streaming progress - only for text output let streamingInterval = null;
if (outputFormat === 'text' && !isSilentMode()) { if (!isSilentMode() && process.stdout.isTTY) {
let dotCount = 0; let dotCount = 0;
const readline = await import('readline'); const readline = await import('readline');
streamingInterval = setInterval(() => { streamingInterval = setInterval(() => {
@@ -346,64 +347,76 @@ async function handleStreamingRequest(
}, 500); }, 500);
} }
// Process the stream
for await (const chunk of stream) { for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) { if (chunk.type === 'content_block_delta' && chunk.delta.text) {
responseText += chunk.delta.text; textContent += chunk.delta.text;
} }
if (reportProgress) { if (reportProgress) {
await reportProgress({ await reportProgress({
progress: (responseText.length / maxTokens) * 100 progress: (textContent.length / maxTokens) * 100
}); });
} }
if (mcpLog) { if (mcpLog) {
mcpLog.info(`Progress: ${(responseText.length / maxTokens) * 100}%`); mcpLog.info(`Progress: ${(textContent.length / maxTokens) * 100}%`);
} }
} }
if (streamingInterval) clearInterval(streamingInterval); if (streamingInterval) clearInterval(streamingInterval);
if (loadingIndicator) {
// Only call stopLoadingIndicator if we started one stopLoadingIndicator(
if (loadingIndicator && outputFormat === 'text' && !isSilentMode()) { loadingIndicator,
stopLoadingIndicator(loadingIndicator); 'Claude processing finished',
true
);
loadingIndicator = null;
} }
report( finalResponse = processClaudeResponse(
`Completed streaming response from ${aiClient ? 'provided' : 'default'} AI client!`, textContent,
'info'
);
// Pass options to processClaudeResponse
return processClaudeResponse(
responseText,
numTasks, numTasks,
0, 0,
prdContent, prdContent,
prdPath, prdPath,
{ reportProgress, mcpLog, session } { reportProgress, mcpLog, session }
); );
if (claudeOverloaded) {
report('Claude is overloaded, falling back to Perplexity', 'warn');
const perplexityClient = getPerplexityClient();
finalResponse = await handleStreamingRequest(
prdContent,
prdPath,
numTasks,
maxTokens,
systemPrompt,
{ reportProgress, mcpLog, session },
perplexityClient,
modelConfig
);
}
return finalResponse;
} catch (error) { } catch (error) {
if (streamingInterval) clearInterval(streamingInterval); if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator, 'Claude stream failed', false);
// Only call stopLoadingIndicator if we started one loadingIndicator = null;
if (loadingIndicator && outputFormat === 'text' && !isSilentMode()) {
stopLoadingIndicator(loadingIndicator);
} }
// Get user-friendly error message if (error.error?.type === 'overloaded_error') {
claudeOverloaded = true;
}
const userMessage = handleClaudeError(error); const userMessage = handleClaudeError(error);
report(`Error: ${userMessage}`, 'error'); report(userMessage, 'error');
// Only show console error for text output (CLI) throw error;
if (outputFormat === 'text' && !isSilentMode()) { } finally {
console.error(chalk.red(userMessage)); if (loadingIndicator) {
const success = !!finalResponse;
const message = success
? 'Claude stream finished'
: 'Claude stream ended';
stopLoadingIndicator(loadingIndicator, message, success);
} }
if (CONFIG.debug && outputFormat === 'text' && !isSilentMode()) {
log('debug', 'Full error:', error);
}
throw new Error(userMessage);
} }
} }
@@ -528,18 +541,27 @@ async function generateSubtasks(
additionalContext = '', additionalContext = '',
{ reportProgress, mcpLog, session } = {} { reportProgress, mcpLog, session } = {}
) { ) {
log('info', `Generating ${numSubtasks} subtasks for Task ${task.id}...`);
const report = (message, level = 'info') => {
if (mcpLog && typeof mcpLog[level] === 'function') {
mcpLog[level](message);
} else if (!isSilentMode()) {
log(level, message);
}
};
let loadingIndicator;
if (!isSilentMode() && !mcpLog) {
loadingIndicator = startLoadingIndicator(
'Claude is generating subtasks...'
);
}
const model = getMainModelId(null);
const maxTokens = getMainMaxTokens(null);
const temperature = getMainTemperature(null);
try { try {
log(
'info',
`Generating ${numSubtasks} subtasks for task ${task.id}: ${task.title}`
);
const loadingIndicator = startLoadingIndicator(
`Generating subtasks for task ${task.id}...`
);
let streamingInterval = null;
let responseText = '';
const systemPrompt = `You are an AI assistant helping with task breakdown for software development. const systemPrompt = `You are an AI assistant helping with task breakdown for software development.
You need to break down a high-level task into ${numSubtasks} specific subtasks that can be implemented one by one. You need to break down a high-level task into ${numSubtasks} specific subtasks that can be implemented one by one.
@@ -585,9 +607,23 @@ Return exactly ${numSubtasks} subtasks with the following JSON structure:
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`; Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
try { const stream = await anthropic.messages.create({
// Update loading indicator to show streaming progress model: model,
// Only create interval if not silent and stdout is a TTY max_tokens: maxTokens,
temperature: temperature,
system: systemPrompt,
messages: [
{
role: 'user',
content: userPrompt
}
],
stream: true
});
let responseText = '';
let streamingInterval = null;
if (!isSilentMode() && process.stdout.isTTY) { if (!isSilentMode() && process.stdout.isTTY) {
let dotCount = 0; let dotCount = 0;
const readline = await import('readline'); const readline = await import('readline');
@@ -600,42 +636,22 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
}, 500); }, 500);
} }
// TODO: MOVE THIS TO THE STREAM REQUEST FUNCTION (DRY)
// Use streaming API call
const stream = await anthropic.messages.create({
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
system: systemPrompt,
messages: [
{
role: 'user',
content: userPrompt
}
],
stream: true
});
// Process the stream
for await (const chunk of stream) { for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) { if (chunk.type === 'content_block_delta' && chunk.delta.text) {
responseText += chunk.delta.text; responseText += chunk.delta.text;
} }
if (reportProgress) { if (reportProgress) {
await reportProgress({ await reportProgress({
progress: (responseText.length / CONFIG.maxTokens) * 100 progress: (responseText.length / maxTokens) * 100
}); });
} }
if (mcpLog) { if (mcpLog) {
mcpLog.info( mcpLog.info(`Progress: ${(responseText.length / maxTokens) * 100}%`);
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
);
} }
} }
if (streamingInterval) clearInterval(streamingInterval); if (streamingInterval) clearInterval(streamingInterval);
stopLoadingIndicator(loadingIndicator); if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
log('info', `Completed generating subtasks for task ${task.id}`); log('info', `Completed generating subtasks for task ${task.id}`);
@@ -646,11 +662,7 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
task.id task.id
); );
} catch (error) { } catch (error) {
if (streamingInterval) clearInterval(streamingInterval); if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
stopLoadingIndicator(loadingIndicator);
throw error;
}
} catch (error) {
log('error', `Error generating subtasks: ${error.message}`); log('error', `Error generating subtasks: ${error.message}`);
throw error; throw error;
} }

View File

@@ -13,7 +13,7 @@ import inquirer from 'inquirer';
import ora from 'ora'; import ora from 'ora';
import Table from 'cli-table3'; import Table from 'cli-table3';
import { CONFIG, log, readJSON, writeJSON } from './utils.js'; import { log, readJSON, writeJSON } from './utils.js';
import { import {
parsePRD, parsePRD,
updateTasks, updateTasks,
@@ -45,16 +45,16 @@ import {
getMainModelId, getMainModelId,
getResearchModelId, getResearchModelId,
getFallbackModelId, getFallbackModelId,
setMainModel,
setResearchModel,
setFallbackModel,
getAvailableModels, getAvailableModels,
VALID_PROVIDERS, VALID_PROVIDERS,
getMainProvider, getMainProvider,
getResearchProvider, getResearchProvider,
getFallbackProvider, getFallbackProvider,
hasApiKeyForProvider, isApiKeySet,
getMcpApiKeyStatus getMcpApiKeyStatus,
getDebugFlag,
getConfig,
writeConfig
} from './config-manager.js'; } from './config-manager.js';
import { import {
@@ -399,7 +399,8 @@ function registerCommands(programInstance) {
); );
} }
if (CONFIG.debug) { // Use getDebugFlag getter instead of CONFIG.debug
if (getDebugFlag(null)) {
console.error(error); console.error(error);
} }
@@ -554,7 +555,8 @@ function registerCommands(programInstance) {
); );
} }
if (CONFIG.debug) { // Use getDebugFlag getter instead of CONFIG.debug
if (getDebugFlag(null)) {
console.error(error); console.error(error);
} }
@@ -640,8 +642,8 @@ function registerCommands(programInstance) {
.option('-a, --all', 'Expand all tasks') .option('-a, --all', 'Expand all tasks')
.option( .option(
'-n, --num <number>', '-n, --num <number>',
'Number of subtasks to generate', 'Number of subtasks to generate (default from config)',
CONFIG.defaultSubtasks.toString() '5' // Set a simple string default here
) )
.option( .option(
'--research', '--research',
@@ -657,7 +659,11 @@ function registerCommands(programInstance) {
) )
.action(async (options) => { .action(async (options) => {
const idArg = options.id; const idArg = options.id;
const numSubtasks = options.num || CONFIG.defaultSubtasks; // Get the actual default if the user didn't provide --num
const numSubtasks =
options.num === '5'
? getDefaultSubtasks(null)
: parseInt(options.num, 10);
const useResearch = options.research || false; const useResearch = options.research || false;
const additionalContext = options.prompt || ''; const additionalContext = options.prompt || '';
const forceFlag = options.force || false; const forceFlag = options.force || false;
@@ -917,7 +923,7 @@ function registerCommands(programInstance) {
console.log(chalk.gray('Next: Complete this task or add more tasks')); console.log(chalk.gray('Next: Complete this task or add more tasks'));
} catch (error) { } catch (error) {
console.error(chalk.red(`Error adding task: ${error.message}`)); console.error(chalk.red(`Error adding task: ${error.message}`));
if (error.stack && CONFIG.debug) { if (error.stack && getDebugFlag(null)) {
console.error(error.stack); console.error(error.stack);
} }
process.exit(1); process.exit(1);
@@ -1583,13 +1589,13 @@ function registerCommands(programInstance) {
) )
.option('--setup', 'Run interactive setup to configure models') .option('--setup', 'Run interactive setup to configure models')
.action(async (options) => { .action(async (options) => {
let modelSetAction = false; // Track if any set action was performed let configModified = false; // Track if config needs saving
const availableModels = getAvailableModels(); // Get available models once const availableModels = getAvailableModels(); // Get available models once
const currentConfig = getConfig(); // Load current config once
// Helper to find provider for a given model ID // Helper to find provider for a given model ID
const findProvider = (modelId) => { const findModelData = (modelId) => {
const modelInfo = availableModels.find((m) => m.id === modelId); return availableModels.find((m) => m.id === modelId);
return modelInfo?.provider;
}; };
try { try {
@@ -1601,27 +1607,27 @@ function registerCommands(programInstance) {
); );
process.exit(1); process.exit(1);
} }
const provider = findProvider(modelId); const modelData = findModelData(modelId);
if (!provider) { if (!modelData || !modelData.provider) {
console.error( console.error(
chalk.red( chalk.red(
`Error: Model ID "${modelId}" not found in available models.` `Error: Model ID "${modelId}" not found or invalid in available models.`
) )
); );
process.exit(1); process.exit(1);
} }
if (setMainModel(provider, modelId)) { // Update the loaded config object
// Call specific setter currentConfig.models.main = {
...currentConfig.models.main, // Keep existing params like maxTokens
provider: modelData.provider,
modelId: modelId
};
console.log( console.log(
chalk.green( chalk.blue(
`Main model set to: ${modelId} (Provider: ${provider})` `Preparing to set main model to: ${modelId} (Provider: ${modelData.provider})`
) )
); );
modelSetAction = true; configModified = true;
} else {
console.error(chalk.red(`Failed to set main model.`));
process.exit(1);
}
} }
if (options.setResearch) { if (options.setResearch) {
@@ -1632,27 +1638,27 @@ function registerCommands(programInstance) {
); );
process.exit(1); process.exit(1);
} }
const provider = findProvider(modelId); const modelData = findModelData(modelId);
if (!provider) { if (!modelData || !modelData.provider) {
console.error( console.error(
chalk.red( chalk.red(
`Error: Model ID "${modelId}" not found in available models.` `Error: Model ID "${modelId}" not found or invalid in available models.`
) )
); );
process.exit(1); process.exit(1);
} }
if (setResearchModel(provider, modelId)) { // Update the loaded config object
// Call specific setter currentConfig.models.research = {
...currentConfig.models.research, // Keep existing params like maxTokens
provider: modelData.provider,
modelId: modelId
};
console.log( console.log(
chalk.green( chalk.blue(
`Research model set to: ${modelId} (Provider: ${provider})` `Preparing to set research model to: ${modelId} (Provider: ${modelData.provider})`
) )
); );
modelSetAction = true; configModified = true;
} else {
console.error(chalk.red(`Failed to set research model.`));
process.exit(1);
}
} }
if (options.setFallback) { if (options.setFallback) {
@@ -1663,30 +1669,49 @@ function registerCommands(programInstance) {
); );
process.exit(1); process.exit(1);
} }
const provider = findProvider(modelId); const modelData = findModelData(modelId);
if (!provider) { if (!modelData || !modelData.provider) {
console.error( console.error(
chalk.red( chalk.red(
`Error: Model ID "${modelId}" not found in available models.` `Error: Model ID "${modelId}" not found or invalid in available models.`
) )
); );
process.exit(1); process.exit(1);
} }
if (setFallbackModel(provider, modelId)) { // Update the loaded config object
// Call specific setter currentConfig.models.fallback = {
...currentConfig.models.fallback, // Keep existing params like maxTokens
provider: modelData.provider,
modelId: modelId
};
console.log( console.log(
chalk.green( chalk.blue(
`Fallback model set to: ${modelId} (Provider: ${provider})` `Preparing to set fallback model to: ${modelId} (Provider: ${modelData.provider})`
) )
); );
modelSetAction = true; configModified = true;
} else {
console.error(chalk.red(`Failed to set fallback model.`));
process.exit(1);
}
} }
// Handle interactive setup first // If any config was modified, write it back to the file
if (configModified) {
if (writeConfig(currentConfig)) {
console.log(
chalk.green(
'Configuration successfully updated in .taskmasterconfig'
)
);
} else {
console.error(
chalk.red(
'Error writing updated configuration to .taskmasterconfig'
)
);
process.exit(1);
}
return; // Exit after successful set operation
}
// Handle interactive setup first (Keep existing setup logic)
if (options.setup) { if (options.setup) {
console.log(chalk.cyan.bold('\nInteractive Model Setup:')); console.log(chalk.cyan.bold('\nInteractive Model Setup:'));
@@ -1817,8 +1842,8 @@ function registerCommands(programInstance) {
return; // Exit after setup return; // Exit after setup
} }
// If no set flags were used and not in setup mode, list the models // If no set flags were used and not in setup mode, list the models (Keep existing list logic)
if (!modelSetAction && !options.setup) { if (!configModified && !options.setup) {
// Fetch current settings // Fetch current settings
const mainProvider = getMainProvider(); const mainProvider = getMainProvider();
const mainModelId = getMainModelId(); const mainModelId = getMainModelId();
@@ -1828,12 +1853,12 @@ function registerCommands(programInstance) {
const fallbackModelId = getFallbackModelId(); // May be undefined const fallbackModelId = getFallbackModelId(); // May be undefined
// Check API keys for both CLI (.env) and MCP (mcp.json) // Check API keys for both CLI (.env) and MCP (mcp.json)
const mainCliKeyOk = hasApiKeyForProvider(mainProvider); const mainCliKeyOk = isApiKeySet(mainProvider); // <-- Use correct function name
const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider); const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider);
const researchCliKeyOk = hasApiKeyForProvider(researchProvider); const researchCliKeyOk = isApiKeySet(researchProvider); // <-- Use correct function name
const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider); const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider);
const fallbackCliKeyOk = fallbackProvider const fallbackCliKeyOk = fallbackProvider
? hasApiKeyForProvider(fallbackProvider) ? isApiKeySet(fallbackProvider) // <-- Use correct function name
: true; // No key needed if no fallback is set : true; // No key needed if no fallback is set
const fallbackMcpKeyOk = fallbackProvider const fallbackMcpKeyOk = fallbackProvider
? getMcpApiKeyStatus(fallbackProvider) ? getMcpApiKeyStatus(fallbackProvider)
@@ -2080,7 +2105,7 @@ function registerCommands(programInstance) {
} }
} catch (error) { } catch (error) {
log(`Error processing models command: ${error.message}`, 'error'); log(`Error processing models command: ${error.message}`, 'error');
if (error.stack && CONFIG.debug) { if (error.stack && getDebugFlag(null)) {
log(error.stack, 'debug'); log(error.stack, 'debug');
} }
process.exit(1); process.exit(1);
@@ -2100,7 +2125,7 @@ function setupCLI() {
.name('dev') .name('dev')
.description('AI-driven development task management') .description('AI-driven development task management')
.version(() => { .version(() => {
// Read version directly from package.json // Read version directly from package.json ONLY
try { try {
const packageJsonPath = path.join(process.cwd(), 'package.json'); const packageJsonPath = path.join(process.cwd(), 'package.json');
if (fs.existsSync(packageJsonPath)) { if (fs.existsSync(packageJsonPath)) {
@@ -2110,9 +2135,13 @@ function setupCLI() {
return packageJson.version; return packageJson.version;
} }
} catch (error) { } catch (error) {
// Silently fall back to default version // Silently fall back to 'unknown'
log(
'warn',
'Could not read package.json for version info in .version()'
);
} }
return CONFIG.projectVersion; // Default fallback return 'unknown'; // Default fallback if package.json fails
}) })
.helpOption('-h, --help', 'Display help') .helpOption('-h, --help', 'Display help')
.addHelpCommand(false) // Disable default help command .addHelpCommand(false) // Disable default help command
@@ -2141,16 +2170,21 @@ function setupCLI() {
* @returns {Promise<{currentVersion: string, latestVersion: string, needsUpdate: boolean}>} * @returns {Promise<{currentVersion: string, latestVersion: string, needsUpdate: boolean}>}
*/ */
async function checkForUpdate() { async function checkForUpdate() {
// Get current version from package.json // Get current version from package.json ONLY
let currentVersion = CONFIG.projectVersion; let currentVersion = 'unknown'; // Initialize with a default
try { try {
// Try to get the version from the installed package // Try to get the version from the installed package (if applicable) or current dir
const packageJsonPath = path.join( let packageJsonPath = path.join(
process.cwd(), process.cwd(),
'node_modules', 'node_modules',
'task-master-ai', 'task-master-ai',
'package.json' 'package.json'
); );
// Fallback to current directory package.json if not found in node_modules
if (!fs.existsSync(packageJsonPath)) {
packageJsonPath = path.join(process.cwd(), 'package.json');
}
if (fs.existsSync(packageJsonPath)) { if (fs.existsSync(packageJsonPath)) {
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
currentVersion = packageJson.version; currentVersion = packageJson.version;
@@ -2303,7 +2337,7 @@ async function runCLI(argv = process.argv) {
} catch (error) { } catch (error) {
console.error(chalk.red(`Error: ${error.message}`)); console.error(chalk.red(`Error: ${error.message}`));
if (CONFIG.debug) { if (getDebugFlag(null)) {
console.error(error); console.error(error);
} }

View File

@@ -2,6 +2,14 @@ import fs from 'fs';
import path from 'path'; import path from 'path';
import chalk from 'chalk'; import chalk from 'chalk';
import { fileURLToPath } from 'url'; import { fileURLToPath } from 'url';
import { ZodError } from 'zod';
import {
log,
readJSON,
writeJSON,
resolveEnvVariable,
findProjectRoot
} from './utils.js';
// Calculate __dirname in ESM // Calculate __dirname in ESM
const __filename = fileURLToPath(import.meta.url); const __filename = fileURLToPath(import.meta.url);
@@ -28,64 +36,50 @@ try {
const CONFIG_FILE_NAME = '.taskmasterconfig'; const CONFIG_FILE_NAME = '.taskmasterconfig';
// Default configuration // Define valid providers dynamically from the loaded MODEL_MAP
const DEFAULT_MAIN_PROVIDER = 'anthropic'; const VALID_PROVIDERS = Object.keys(MODEL_MAP);
const DEFAULT_MAIN_MODEL_ID = 'claude-3.7-sonnet-20250219';
const DEFAULT_RESEARCH_PROVIDER = 'perplexity';
const DEFAULT_RESEARCH_MODEL_ID = 'sonar-pro';
// Define ONE list of all supported providers // Default configuration values (used if .taskmasterconfig is missing or incomplete)
const VALID_PROVIDERS = [ const DEFAULTS = {
'anthropic',
'openai',
'google',
'perplexity',
'ollama',
'openrouter',
'grok'
];
let projectRoot = null;
function findProjectRoot() {
// Keep this function as is for CLI context
if (projectRoot) return projectRoot;
let currentDir = process.cwd();
while (currentDir !== path.parse(currentDir).root) {
if (fs.existsSync(path.join(currentDir, 'package.json'))) {
projectRoot = currentDir;
return projectRoot;
}
currentDir = path.dirname(currentDir);
}
// Check root directory as a last resort
if (fs.existsSync(path.join(currentDir, 'package.json'))) {
projectRoot = currentDir;
return projectRoot;
}
// If still not found, maybe look for other markers or return null
// For now, returning null if package.json isn't found up to the root
projectRoot = null;
return null;
}
function readConfig(explicitRoot = null) {
// Determine the root path to use
const rootToUse = explicitRoot || findProjectRoot();
const defaults = {
models: { models: {
main: { provider: DEFAULT_MAIN_PROVIDER, modelId: DEFAULT_MAIN_MODEL_ID }, main: {
provider: 'anthropic',
modelId: 'claude-3-7-sonnet-20250219',
maxTokens: 64000,
temperature: 0.2
},
research: { research: {
provider: DEFAULT_RESEARCH_PROVIDER, provider: 'perplexity',
modelId: DEFAULT_RESEARCH_MODEL_ID modelId: 'sonar-pro',
maxTokens: 8700,
temperature: 0.1
},
fallback: {
// No default fallback provider/model initially
provider: 'anthropic',
modelId: 'claude-3-5-sonnet',
maxTokens: 64000, // Default parameters if fallback IS configured
temperature: 0.2
} }
},
global: {
logLevel: 'info',
debug: false,
defaultSubtasks: 5,
defaultPriority: 'medium',
projectName: 'Task Master',
ollamaBaseUrl: 'http://localhost:11434/api'
} }
}; };
// --- Internal Config Loading ---
let loadedConfig = null; // Cache for loaded config
function _loadAndValidateConfig(explicitRoot = null) {
// Determine the root path to use
const rootToUse = explicitRoot || findProjectRoot();
const defaults = DEFAULTS; // Use the defined defaults
if (!rootToUse) { if (!rootToUse) {
console.warn( console.warn(
chalk.yellow( chalk.yellow(
@@ -101,75 +95,60 @@ function readConfig(explicitRoot = null) {
const rawData = fs.readFileSync(configPath, 'utf-8'); const rawData = fs.readFileSync(configPath, 'utf-8');
const parsedConfig = JSON.parse(rawData); const parsedConfig = JSON.parse(rawData);
// Deep merge defaults to ensure structure and handle partial configs // Deep merge with defaults
const config = { const config = {
models: { models: {
main: { main: { ...defaults.models.main, ...parsedConfig?.models?.main },
provider:
parsedConfig?.models?.main?.provider ??
defaults.models.main.provider,
modelId:
parsedConfig?.models?.main?.modelId ??
defaults.models.main.modelId
},
research: { research: {
provider: ...defaults.models.research,
parsedConfig?.models?.research?.provider ?? ...parsedConfig?.models?.research
defaults.models.research.provider,
modelId:
parsedConfig?.models?.research?.modelId ??
defaults.models.research.modelId
}, },
// Add merge logic for the fallback model // Fallback needs careful merging - only merge if provider/model exist
fallback: { fallback:
provider: parsedConfig?.models?.fallback?.provider, parsedConfig?.models?.fallback?.provider &&
modelId: parsedConfig?.models?.fallback?.modelId parsedConfig?.models?.fallback?.modelId
} ? { ...defaults.models.fallback, ...parsedConfig.models.fallback }
} : { ...defaults.models.fallback } // Use default params even if provider/model missing
},
global: { ...defaults.global, ...parsedConfig?.global }
}; };
// Validate loaded providers (main, research, and fallback if it exists) // --- Validation ---
// Validate main provider/model
if (!validateProvider(config.models.main.provider)) { if (!validateProvider(config.models.main.provider)) {
console.warn( console.warn(
chalk.yellow( chalk.yellow(
`Warning: Invalid main provider "${config.models.main.provider}" in ${CONFIG_FILE_NAME}. Falling back to default.` `Warning: Invalid main provider "${config.models.main.provider}" in ${CONFIG_FILE_NAME}. Falling back to default.`
) )
); );
config.models.main = { config.models.main = { ...defaults.models.main };
provider: defaults.models.main.provider,
modelId: defaults.models.main.modelId
};
} }
// Optional: Add warning for model combination if desired, but don't block // Optional: Add warning for model combination if desired
// else if (!validateProviderModelCombination(config.models.main.provider, config.models.main.modelId)) { ... }
// Validate research provider/model
if (!validateProvider(config.models.research.provider)) { if (!validateProvider(config.models.research.provider)) {
console.warn( console.warn(
chalk.yellow( chalk.yellow(
`Warning: Invalid research provider "${config.models.research.provider}" in ${CONFIG_FILE_NAME}. Falling back to default.` `Warning: Invalid research provider "${config.models.research.provider}" in ${CONFIG_FILE_NAME}. Falling back to default.`
) )
); );
config.models.research = { config.models.research = { ...defaults.models.research };
provider: defaults.models.research.provider,
modelId: defaults.models.research.modelId
};
} }
// Optional: Add warning for model combination if desired, but don't block // Optional: Add warning for model combination if desired
// else if (!validateProviderModelCombination(config.models.research.provider, config.models.research.modelId)) { ... }
// Add validation for fallback provider if it exists // Validate fallback provider if it exists
if ( if (
config.models.fallback && config.models.fallback?.provider &&
config.models.fallback.provider &&
!validateProvider(config.models.fallback.provider) !validateProvider(config.models.fallback.provider)
) { ) {
console.warn( console.warn(
chalk.yellow( chalk.yellow(
`Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${CONFIG_FILE_NAME}. Fallback model will be ignored.` `Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${CONFIG_FILE_NAME}. Fallback model configuration will be ignored.`
) )
); );
// Unlike main/research, we don't set a default fallback, just ignore it // Clear invalid fallback provider/model, but keep default params if needed elsewhere
delete config.models.fallback; config.models.fallback.provider = undefined;
config.models.fallback.modelId = undefined;
} }
return config; return config;
@@ -182,10 +161,28 @@ function readConfig(explicitRoot = null) {
return defaults; return defaults;
} }
} else { } else {
// Config file doesn't exist, use defaults
return defaults; return defaults;
} }
} }
/**
* Gets the current configuration, loading it if necessary.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @param {boolean} forceReload - Force reloading the config file.
* @returns {object} The loaded configuration object.
*/
function getConfig(explicitRoot = null, forceReload = false) {
if (!loadedConfig || forceReload) {
loadedConfig = _loadAndValidateConfig(explicitRoot);
}
// If an explicitRoot was provided for a one-off check, don't cache it permanently
if (explicitRoot && !forceReload) {
return _loadAndValidateConfig(explicitRoot);
}
return loadedConfig;
}
/** /**
* Validates if a provider name is in the list of supported providers. * Validates if a provider name is in the list of supported providers.
* @param {string} providerName The name of the provider. * @param {string} providerName The name of the provider.
@@ -215,402 +212,134 @@ function validateProviderModelCombination(providerName, modelId) {
); );
} }
/** // --- Role-Specific Getters ---
* Gets the currently configured main AI provider.
* @param {string|null} explicitRoot - Optional explicit path to the project root. function getModelConfigForRole(role, explicitRoot = null) {
* @returns {string} The name of the main provider. const config = getConfig(explicitRoot);
*/ const roleConfig = config?.models?.[role];
if (!roleConfig) {
log('warn', `No model configuration found for role: ${role}`);
return DEFAULTS.models[role] || {}; // Fallback to default for the role
}
return roleConfig;
}
function getMainProvider(explicitRoot = null) { function getMainProvider(explicitRoot = null) {
const config = readConfig(explicitRoot); return getModelConfigForRole('main', explicitRoot).provider;
return config.models.main.provider;
} }
/**
* Gets the currently configured main AI model ID.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The ID of the main model.
*/
function getMainModelId(explicitRoot = null) { function getMainModelId(explicitRoot = null) {
const config = readConfig(explicitRoot); return getModelConfigForRole('main', explicitRoot).modelId;
return config.models.main.modelId; }
function getMainMaxTokens(explicitRoot = null) {
return getModelConfigForRole('main', explicitRoot).maxTokens;
}
function getMainTemperature(explicitRoot = null) {
return getModelConfigForRole('main', explicitRoot).temperature;
} }
/**
* Gets the currently configured research AI provider.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The name of the research provider.
*/
function getResearchProvider(explicitRoot = null) { function getResearchProvider(explicitRoot = null) {
const config = readConfig(explicitRoot); return getModelConfigForRole('research', explicitRoot).provider;
return config.models.research.provider;
} }
/**
* Gets the currently configured research AI model ID.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The ID of the research model.
*/
function getResearchModelId(explicitRoot = null) { function getResearchModelId(explicitRoot = null) {
const config = readConfig(explicitRoot); return getModelConfigForRole('research', explicitRoot).modelId;
return config.models.research.modelId; }
function getResearchMaxTokens(explicitRoot = null) {
return getModelConfigForRole('research', explicitRoot).maxTokens;
}
function getResearchTemperature(explicitRoot = null) {
return getModelConfigForRole('research', explicitRoot).temperature;
} }
/**
* Gets the currently configured fallback AI provider.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|undefined} The name of the fallback provider, or undefined if not set.
*/
function getFallbackProvider(explicitRoot = null) { function getFallbackProvider(explicitRoot = null) {
const config = readConfig(explicitRoot); // Specifically check if provider is set, as fallback is optional
return config.models?.fallback?.provider; return getModelConfigForRole('fallback', explicitRoot).provider || undefined;
} }
/**
* Gets the currently configured fallback AI model ID.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|undefined} The ID of the fallback model, or undefined if not set.
*/
function getFallbackModelId(explicitRoot = null) { function getFallbackModelId(explicitRoot = null) {
const config = readConfig(explicitRoot); // Specifically check if modelId is set
return config.models?.fallback?.modelId; return getModelConfigForRole('fallback', explicitRoot).modelId || undefined;
}
function getFallbackMaxTokens(explicitRoot = null) {
// Return fallback tokens even if provider/model isn't set, in case it's needed generically
return getModelConfigForRole('fallback', explicitRoot).maxTokens;
}
function getFallbackTemperature(explicitRoot = null) {
// Return fallback temp even if provider/model isn't set
return getModelConfigForRole('fallback', explicitRoot).temperature;
}
// --- Global Settings Getters ---
function getGlobalConfig(explicitRoot = null) {
const config = getConfig(explicitRoot);
return config?.global || DEFAULTS.global;
}
function getLogLevel(explicitRoot = null) {
return getGlobalConfig(explicitRoot).logLevel;
}
function getDebugFlag(explicitRoot = null) {
// Ensure boolean type
return getGlobalConfig(explicitRoot).debug === true;
}
function getDefaultSubtasks(explicitRoot = null) {
// Ensure integer type
return parseInt(getGlobalConfig(explicitRoot).defaultSubtasks, 10);
}
function getDefaultPriority(explicitRoot = null) {
return getGlobalConfig(explicitRoot).defaultPriority;
}
function getProjectName(explicitRoot = null) {
return getGlobalConfig(explicitRoot).projectName;
}
function getOllamaBaseUrl(explicitRoot = null) {
return getGlobalConfig(explicitRoot).ollamaBaseUrl;
} }
/** /**
* Sets the main AI model (provider and modelId) in the configuration file. * Checks if the API key for a given provider is set in the environment.
* @param {string} providerName The name of the provider to set. * Checks process.env first, then session.env if session is provided.
* @param {string} modelId The ID of the model to set. * @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').
* @param {string|null} explicitRoot - Optional explicit path to the project root. * @param {object|null} [session=null] - The MCP session object (optional).
* @returns {boolean} True if successful, false otherwise. * @returns {boolean} True if the API key is set, false otherwise.
*/ */
function setMainModel(providerName, modelId, explicitRoot = null) { function isApiKeySet(providerName, session = null) {
// --- 1. Validate Provider First --- // Define the expected environment variable name for each provider
if (!validateProvider(providerName)) { const keyMap = {
console.error( openai: 'OPENAI_API_KEY',
chalk.red(`Error: "${providerName}" is not a valid provider.`) anthropic: 'ANTHROPIC_API_KEY',
); google: 'GOOGLE_API_KEY',
console.log( perplexity: 'PERPLEXITY_API_KEY',
chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`) grok: 'GROK_API_KEY', // Assuming GROK_API_KEY based on env.example
); mistral: 'MISTRAL_API_KEY',
azure: 'AZURE_OPENAI_API_KEY', // Azure needs endpoint too, but key presence is a start
openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY'
// Add other providers as needed
};
const providerKey = providerName?.toLowerCase();
if (!providerKey || !keyMap[providerKey]) {
log('warn', `Unknown provider name: ${providerName} in isApiKeySet check.`);
return false; return false;
} }
// --- 2. Validate Role Second --- const envVarName = keyMap[providerKey];
const allModels = getAvailableModels(); // Get all models to check roles // Use resolveEnvVariable to check both process.env and session.env
const modelData = allModels.find( return !!resolveEnvVariable(envVarName, session);
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('main')
) {
console.error(
chalk.red(`Error: Model "${modelId}" is not allowed for the 'main' role.`)
);
// Try to suggest valid models for the role
const allowedMainModels = allModels
.filter((m) => m.allowed_roles?.includes('main'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedMainModels) {
console.log(
chalk.yellow('\nAllowed models for main role:\n' + allowedMainModels)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) {
console.warn(
chalk.yellow(
`Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.`
)
);
}
// --- Proceed with setting ---
const config = readConfig(explicitRoot);
config.models.main = { provider: providerName, modelId: modelId };
// Pass explicitRoot down
if (writeConfig(config, explicitRoot)) {
console.log(
chalk.green(`Main AI model set to: ${providerName} / ${modelId}`)
);
return true;
} else {
return false;
}
}
/**
* Sets the research AI model (provider and modelId) in the configuration file.
* @param {string} providerName The name of the provider to set.
* @param {string} modelId The ID of the model to set.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function setResearchModel(providerName, modelId, explicitRoot = null) {
// --- 1. Validate Provider First ---
if (!validateProvider(providerName)) {
console.error(
chalk.red(`Error: "${providerName}" is not a valid provider.`)
);
console.log(
chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`)
);
return false;
}
// --- 2. Validate Role Second ---
const allModels = getAvailableModels(); // Get all models to check roles
const modelData = allModels.find(
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('research')
) {
console.error(
chalk.red(
`Error: Model "${modelId}" is not allowed for the 'research' role.`
)
);
// Try to suggest valid models for the role
const allowedResearchModels = allModels
.filter((m) => m.allowed_roles?.includes('research'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedResearchModels) {
console.log(
chalk.yellow(
'\nAllowed models for research role:\n' + allowedResearchModels
)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) {
console.warn(
chalk.yellow(
`Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.`
)
);
}
// --- 4. Specific Research Warning (Optional) ---
if (
providerName === 'anthropic' ||
(providerName === 'openai' && modelId.includes('3.5'))
) {
console.warn(
chalk.yellow(
`Warning: Provider "${providerName}" with model "${modelId}" may not be ideal for research tasks. Perplexity or Grok recommended.`
)
);
}
// --- Proceed with setting ---
const config = readConfig(explicitRoot);
config.models.research = { provider: providerName, modelId: modelId };
// Pass explicitRoot down
if (writeConfig(config, explicitRoot)) {
console.log(
chalk.green(`Research AI model set to: ${providerName} / ${modelId}`)
);
return true;
} else {
return false;
}
}
/**
* Sets the fallback AI model (provider and modelId) in the configuration file.
* @param {string} providerName The name of the provider to set.
* @param {string} modelId The ID of the model to set.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function setFallbackModel(providerName, modelId, explicitRoot = null) {
// --- 1. Validate Provider First ---
if (!validateProvider(providerName)) {
console.error(
chalk.red(`Error: "${providerName}" is not a valid provider.`)
);
console.log(
chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`)
);
return false;
}
// --- 2. Validate Role Second ---
const allModels = getAvailableModels(); // Get all models to check roles
const modelData = allModels.find(
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('fallback')
) {
console.error(
chalk.red(
`Error: Model "${modelId}" is not allowed for the 'fallback' role.`
)
);
// Try to suggest valid models for the role
const allowedFallbackModels = allModels
.filter((m) => m.allowed_roles?.includes('fallback'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedFallbackModels) {
console.log(
chalk.yellow(
'\nAllowed models for fallback role:\n' + allowedFallbackModels
)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) {
console.warn(
chalk.yellow(
`Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.`
)
);
}
// --- Proceed with setting ---
const config = readConfig(explicitRoot);
if (!config.models) {
config.models = {}; // Ensure models object exists
}
// Ensure fallback object exists
if (!config.models.fallback) {
config.models.fallback = {};
}
config.models.fallback = { provider: providerName, modelId: modelId };
return writeConfig(config, explicitRoot);
}
/**
* Gets a list of available models based on the MODEL_MAP.
* @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}
*/
function getAvailableModels() {
const available = [];
for (const [provider, models] of Object.entries(MODEL_MAP)) {
if (models.length > 0) {
models.forEach((modelObj) => {
// Basic name generation - can be improved
const modelId = modelObj.id;
const sweScore = modelObj.swe_score;
const cost = modelObj.cost_per_1m_tokens;
const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
const nameParts = modelId
.split('-')
.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
// Handle specific known names better if needed
let name = nameParts.join(' ');
if (modelId === 'claude-3.5-sonnet-20240620')
name = 'Claude 3.5 Sonnet';
if (modelId === 'claude-3-7-sonnet-20250219')
name = 'Claude 3.7 Sonnet';
if (modelId === 'gpt-4o') name = 'GPT-4o';
if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';
available.push({
id: modelId,
name: name,
provider: provider,
swe_score: sweScore,
cost_per_1m_tokens: cost,
allowed_roles: allowedRoles
});
});
} else {
// For providers with empty lists (like ollama), maybe add a placeholder or skip
available.push({
id: `[${provider}-any]`,
name: `Any (${provider})`,
provider: provider
});
}
}
return available;
}
/**
* Writes the configuration object to the file.
* @param {Object} config The configuration object to write.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function writeConfig(config, explicitRoot = null) {
const rootPath = explicitRoot || findProjectRoot();
if (!rootPath) {
console.error(
chalk.red(
'Error: Could not determine project root. Configuration not saved.'
)
);
return false;
}
// Ensure we don't double-join if explicitRoot already contains the filename
const configPath =
path.basename(rootPath) === CONFIG_FILE_NAME
? rootPath
: path.join(rootPath, CONFIG_FILE_NAME);
try {
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
return true;
} catch (error) {
console.error(
chalk.red(
`Error writing configuration to ${configPath}: ${error.message}`
)
);
return false;
}
}
/**
* Checks if the required API key environment variable is set for a given provider.
* @param {string} providerName The name of the provider.
* @returns {boolean} True if the API key environment variable exists and is non-empty, false otherwise.
*/
function hasApiKeyForProvider(providerName) {
switch (providerName) {
case 'anthropic':
return !!process.env.ANTHROPIC_API_KEY;
case 'openai':
case 'openrouter': // OpenRouter uses OpenAI-compatible key
return !!process.env.OPENAI_API_KEY;
case 'google':
return !!process.env.GOOGLE_API_KEY;
case 'perplexity':
return !!process.env.PERPLEXITY_API_KEY;
case 'grok':
case 'xai': // Added alias for Grok
return !!process.env.GROK_API_KEY;
case 'ollama':
return true; // Ollama runs locally, no cloud API key needed
default:
return false; // Unknown provider cannot have a key checked
}
} }
/** /**
@@ -685,24 +414,125 @@ function getMcpApiKeyStatus(providerName) {
} }
} }
/**
* Gets a list of available models based on the MODEL_MAP.
* @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}
*/
function getAvailableModels() {
const available = [];
for (const [provider, models] of Object.entries(MODEL_MAP)) {
if (models.length > 0) {
models.forEach((modelObj) => {
// Basic name generation - can be improved
const modelId = modelObj.id;
const sweScore = modelObj.swe_score;
const cost = modelObj.cost_per_1m_tokens;
const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
const nameParts = modelId
.split('-')
.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
// Handle specific known names better if needed
let name = nameParts.join(' ');
if (modelId === 'claude-3.5-sonnet-20240620')
name = 'Claude 3.5 Sonnet';
if (modelId === 'claude-3-7-sonnet-20250219')
name = 'Claude 3.7 Sonnet';
if (modelId === 'gpt-4o') name = 'GPT-4o';
if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';
available.push({
id: modelId,
name: name,
provider: provider,
swe_score: sweScore,
cost_per_1m_tokens: cost,
allowed_roles: allowedRoles
});
});
} else {
// For providers with empty lists (like ollama), maybe add a placeholder or skip
available.push({
id: `[${provider}-any]`,
name: `Any (${provider})`,
provider: provider
});
}
}
return available;
}
/**
* Writes the configuration object to the file.
* @param {Object} config The configuration object to write.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function writeConfig(config, explicitRoot = null) {
const rootPath = explicitRoot || findProjectRoot();
if (!rootPath) {
console.error(
chalk.red(
'Error: Could not determine project root. Configuration not saved.'
)
);
return false;
}
const configPath =
path.basename(rootPath) === CONFIG_FILE_NAME
? rootPath
: path.join(rootPath, CONFIG_FILE_NAME);
try {
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
loadedConfig = config; // Update the cache after successful write
return true;
} catch (error) {
console.error(
chalk.red(
`Error writing configuration to ${configPath}: ${error.message}`
)
);
return false;
}
}
export { export {
// Not exporting findProjectRoot as it's internal for CLI context now // Core config access
readConfig, // Keep exporting if direct access is needed elsewhere getConfig, // Might still be useful for getting the whole object
writeConfig, // Keep exporting if direct access is needed elsewhere writeConfig,
// Validation
validateProvider, validateProvider,
validateProviderModelCombination, validateProviderModelCombination,
getMainProvider,
getMainModelId,
getResearchProvider,
getResearchModelId,
getFallbackProvider,
getFallbackModelId,
setMainModel,
setResearchModel,
setFallbackModel,
VALID_PROVIDERS, VALID_PROVIDERS,
MODEL_MAP, MODEL_MAP,
getAvailableModels, getAvailableModels,
hasApiKeyForProvider,
// Role-specific getters
getMainProvider,
getMainModelId,
getMainMaxTokens,
getMainTemperature,
getResearchProvider,
getResearchModelId,
getResearchMaxTokens,
getResearchTemperature,
getFallbackProvider,
getFallbackModelId,
getFallbackMaxTokens,
getFallbackTemperature,
// Global setting getters
getLogLevel,
getDebugFlag,
getDefaultSubtasks,
getDefaultPriority,
getProjectName,
getOllamaBaseUrl,
// API Key Checkers (still relevant)
isApiKeySet,
getMcpApiKeyStatus getMcpApiKeyStatus
}; };

View File

@@ -14,7 +14,6 @@ import ora from 'ora';
import inquirer from 'inquirer'; import inquirer from 'inquirer';
import { import {
CONFIG,
log, log,
readJSON, readJSON,
writeJSON, writeJSON,
@@ -86,6 +85,14 @@ try {
log('warn', 'Research-backed features will not be available'); log('warn', 'Research-backed features will not be available');
} }
// Import necessary config getters
import {
getDebugFlag,
getDefaultSubtasks,
getDefaultPriority
// Add other getters here as needed later
} from './config-manager.js';
/** /**
* Parse a PRD file and generate tasks * Parse a PRD file and generate tasks
* @param {string} prdPath - Path to the PRD file * @param {string} prdPath - Path to the PRD file
@@ -196,7 +203,8 @@ async function parsePRD(
if (outputFormat === 'text') { if (outputFormat === 'text') {
console.error(chalk.red(`Error: ${error.message}`)); console.error(chalk.red(`Error: ${error.message}`));
if (CONFIG.debug) { if (getDebugFlag()) {
// Use getter
console.error(error); console.error(error);
} }
@@ -675,7 +683,8 @@ Return only the updated task as a valid JSON object.`
console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.'); console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');
} }
if (CONFIG.debug) { if (getDebugFlag()) {
// Use getter
console.error(error); console.error(error);
} }
@@ -1337,7 +1346,8 @@ Return only the updated task as a valid JSON object.`
console.log(' 2. Use a valid task ID with the --id parameter'); console.log(' 2. Use a valid task ID with the --id parameter');
} }
if (CONFIG.debug) { if (getDebugFlag()) {
// Use getter
console.error(error); console.error(error);
} }
} else { } else {
@@ -1484,7 +1494,8 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
if (!options?.mcpLog) { if (!options?.mcpLog) {
console.error(chalk.red(`Error generating task files: ${error.message}`)); console.error(chalk.red(`Error generating task files: ${error.message}`));
if (CONFIG.debug) { if (getDebugFlag()) {
// Use getter
console.error(error); console.error(error);
} }
@@ -1584,7 +1595,8 @@ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
if (!options?.mcpLog) { if (!options?.mcpLog) {
console.error(chalk.red(`Error: ${error.message}`)); console.error(chalk.red(`Error: ${error.message}`));
if (CONFIG.debug) { if (getDebugFlag()) {
// Use getter
console.error(error); console.error(error);
} }
@@ -2477,7 +2489,7 @@ async function expandTask(
} }
// Determine the number of subtasks to generate // Determine the number of subtasks to generate
let subtaskCount = parseInt(numSubtasks, 10) || CONFIG.defaultSubtasks; let subtaskCount = parseInt(numSubtasks, 10) || getDefaultSubtasks(); // Use getter
// Check if we have a complexity analysis for this task // Check if we have a complexity analysis for this task
let taskAnalysis = null; let taskAnalysis = null;
@@ -2504,7 +2516,7 @@ async function expandTask(
// Use recommended number of subtasks if available // Use recommended number of subtasks if available
if ( if (
taskAnalysis.recommendedSubtasks && taskAnalysis.recommendedSubtasks &&
subtaskCount === CONFIG.defaultSubtasks subtaskCount === getDefaultSubtasks() // Use getter
) { ) {
subtaskCount = taskAnalysis.recommendedSubtasks; subtaskCount = taskAnalysis.recommendedSubtasks;
report(`Using recommended number of subtasks: ${subtaskCount}`); report(`Using recommended number of subtasks: ${subtaskCount}`);
@@ -2672,7 +2684,7 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
*/ */
async function expandAllTasks( async function expandAllTasks(
tasksPath, tasksPath,
numSubtasks = CONFIG.defaultSubtasks, numSubtasks = getDefaultSubtasks(), // Use getter
useResearch = false, useResearch = false,
additionalContext = '', additionalContext = '',
forceFlag = false, forceFlag = false,
@@ -2698,7 +2710,7 @@ async function expandAllTasks(
if (typeof numSubtasks === 'string') { if (typeof numSubtasks === 'string') {
numSubtasks = parseInt(numSubtasks, 10); numSubtasks = parseInt(numSubtasks, 10);
if (isNaN(numSubtasks)) { if (isNaN(numSubtasks)) {
numSubtasks = CONFIG.defaultSubtasks; numSubtasks = getDefaultSubtasks(); // Use getter
} }
} }
@@ -3127,7 +3139,7 @@ async function addTask(
tasksPath, tasksPath,
prompt, prompt,
dependencies = [], dependencies = [],
priority = 'medium', priority = getDefaultPriority(), // Use getter
{ reportProgress, mcpLog, session } = {}, { reportProgress, mcpLog, session } = {},
outputFormat = 'text', outputFormat = 'text',
customEnv = null, customEnv = null,
@@ -4415,7 +4427,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
console.error( console.error(
chalk.red(`Error parsing complexity analysis: ${error.message}`) chalk.red(`Error parsing complexity analysis: ${error.message}`)
); );
if (CONFIG.debug) { if (getDebugFlag()) {
// Use getter
console.debug( console.debug(
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`) chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
); );
@@ -4460,7 +4473,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
); );
} }
if (CONFIG.debug) { if (getDebugFlag()) {
// Use getter
console.error(error); console.error(error);
} }
@@ -5382,7 +5396,8 @@ Provide concrete examples, code snippets, or implementation details when relevan
); );
} }
if (CONFIG.debug) { if (getDebugFlag()) {
// Use getter
console.error(error); console.error(error);
} }
} else { } else {

View File

@@ -1,32 +0,0 @@
async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) {
let loadingIndicator = null;
try {
log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
// Validate subtask ID format
if (!subtaskId || typeof subtaskId !== 'string' || !subtaskId.includes('.')) {
throw new Error(`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId"`);
}
// Validate prompt
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
throw new Error('Prompt cannot be empty. Please provide context for the subtask update.');
}
// Prepare for fallback handling
let claudeOverloaded = false;
// Validate tasks file exists
if (!fs.existsSync(tasksPath)) {
throw new Error(`Tasks file not found at path: ${tasksPath}`);
}
// Read the tasks file
const data = readJSON(tasksPath);
// ... rest of the function
} catch (error) {
// Handle errors
console.error(`Error updating subtask: ${error.message}`);
throw error;
}
}

View File

@@ -10,7 +10,6 @@ import ora from 'ora';
import Table from 'cli-table3'; import Table from 'cli-table3';
import gradient from 'gradient-string'; import gradient from 'gradient-string';
import { import {
CONFIG,
log, log,
findTaskById, findTaskById,
readJSON, readJSON,
@@ -20,6 +19,7 @@ import {
import path from 'path'; import path from 'path';
import fs from 'fs'; import fs from 'fs';
import { findNextTask, analyzeTaskComplexity } from './task-manager.js'; import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
// Create a color gradient for the banner // Create a color gradient for the banner
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']); const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
@@ -44,7 +44,7 @@ function displayBanner() {
); );
// Read version directly from package.json // Read version directly from package.json
let version = CONFIG.projectVersion; // Default fallback let version = 'unknown'; // Initialize with a default
try { try {
const packageJsonPath = path.join(process.cwd(), 'package.json'); const packageJsonPath = path.join(process.cwd(), 'package.json');
if (fs.existsSync(packageJsonPath)) { if (fs.existsSync(packageJsonPath)) {
@@ -53,12 +53,13 @@ function displayBanner() {
} }
} catch (error) { } catch (error) {
// Silently fall back to default version // Silently fall back to default version
log('warn', 'Could not read package.json for version info.');
} }
console.log( console.log(
boxen( boxen(
chalk.white( chalk.white(
`${chalk.bold('Version:')} ${version} ${chalk.bold('Project:')} ${CONFIG.projectName}` `${chalk.bold('Version:')} ${version} ${chalk.bold('Project:')} ${getProjectName(null)}`
), ),
{ {
padding: 1, padding: 1,
@@ -1652,6 +1653,45 @@ async function displayComplexityReport(reportPath) {
); );
} }
/**
* Generate a prompt for complexity analysis
* @param {Object} tasksData - Tasks data object containing tasks array
* @returns {string} Generated prompt
*/
function generateComplexityAnalysisPrompt(tasksData) {
const defaultSubtasks = getDefaultSubtasks(null); // Use the getter
return `Analyze the complexity of the following tasks and provide recommendations for subtask breakdown:
${tasksData.tasks
.map(
(task) => `
Task ID: ${task.id}
Title: ${task.title}
Description: ${task.description}
Details: ${task.details}
Dependencies: ${JSON.stringify(task.dependencies || [])}
Priority: ${task.priority || 'medium'}
`
)
.join('\n---\n')}
Analyze each task and return a JSON array with the following structure for each task:
[
{
"taskId": number,
"taskTitle": string,
"complexityScore": number (1-10),
"recommendedSubtasks": number (${Math.max(3, defaultSubtasks - 1)}-${Math.min(8, defaultSubtasks + 2)}),
"expansionPrompt": string (a specific prompt for generating good subtasks),
"reasoning": string (brief explanation of your assessment)
},
...
]
IMPORTANT: Make sure to include an analysis for EVERY task listed above, with the correct taskId matching each task's ID.
`;
}
/** /**
* Confirm overwriting existing tasks.json file * Confirm overwriting existing tasks.json file
* @param {string} tasksPath - Path to the tasks.json file * @param {string} tasksPath - Path to the tasks.json file
@@ -1706,5 +1746,6 @@ export {
displayNextTask, displayNextTask,
displayTaskById, displayTaskById,
displayComplexityReport, displayComplexityReport,
generateComplexityAnalysisPrompt,
confirmTaskOverwrite confirmTaskOverwrite
}; };

View File

@@ -6,22 +6,61 @@
import fs from 'fs'; import fs from 'fs';
import path from 'path'; import path from 'path';
import chalk from 'chalk'; import chalk from 'chalk';
import { ZodError } from 'zod';
// Import specific config getters needed here
import { getLogLevel, getDebugFlag } from './config-manager.js';
// Global silent mode flag // Global silent mode flag
let silentMode = false; let silentMode = false;
// Configuration and constants // --- Environment Variable Resolution Utility ---
const CONFIG = { /**
model: process.env.MODEL || 'claude-3-7-sonnet-20250219', * Resolves an environment variable by checking process.env first, then session.env.
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'), * @param {string} varName - The name of the environment variable.
temperature: parseFloat(process.env.TEMPERATURE || '0.7'), * @param {string|null} session - The MCP session object (optional).
debug: process.env.DEBUG === 'true', * @returns {string|undefined} The value of the environment variable or undefined if not found.
logLevel: process.env.LOG_LEVEL || 'info', */
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || '3'), function resolveEnvVariable(varName, session) {
defaultPriority: process.env.DEFAULT_PRIORITY || 'medium', // Ensure session and session.env exist before attempting access
projectName: process.env.PROJECT_NAME || 'Task Master', const sessionValue =
projectVersion: '1.5.0' // Hardcoded version - ALWAYS use this value, ignore environment variable session && session.env ? session.env[varName] : undefined;
}; return process.env[varName] ?? sessionValue;
}
// --- Project Root Finding Utility ---
/**
* Finds the project root directory by searching upwards from a given starting point
* for a marker file or directory (e.g., 'package.json', '.git').
* @param {string} [startPath=process.cwd()] - The directory to start searching from.
* @param {string[]} [markers=['package.json', '.git', '.taskmasterconfig']] - Marker files/dirs to look for.
* @returns {string|null} The path to the project root directory, or null if not found.
*/
function findProjectRoot(
startPath = process.cwd(),
markers = ['package.json', '.git', '.taskmasterconfig']
) {
let currentPath = path.resolve(startPath);
while (true) {
for (const marker of markers) {
if (fs.existsSync(path.join(currentPath, marker))) {
return currentPath;
}
}
const parentPath = path.dirname(currentPath);
if (parentPath === currentPath) {
// Reached the filesystem root
return null;
}
currentPath = parentPath;
}
}
// --- Dynamic Configuration Function --- (REMOVED)
/*
function getConfig(session = null) {
// ... implementation removed ...
}
*/
// Set up logging based on log level // Set up logging based on log level
const LOG_LEVELS = { const LOG_LEVELS = {
@@ -73,6 +112,9 @@ function log(level, ...args) {
return; return;
} }
// Get log level dynamically from config-manager
const configLevel = getLogLevel() || 'info'; // Use getter
// Use text prefixes instead of emojis // Use text prefixes instead of emojis
const prefixes = { const prefixes = {
debug: chalk.gray('[DEBUG]'), debug: chalk.gray('[DEBUG]'),
@@ -84,7 +126,6 @@ function log(level, ...args) {
// Ensure level exists, default to info if not // Ensure level exists, default to info if not
const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info'; const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';
const configLevel = CONFIG.logLevel || 'info'; // Ensure configLevel has a default
// Check log level configuration // Check log level configuration
if ( if (
@@ -106,12 +147,15 @@ function log(level, ...args) {
* @returns {Object|null} Parsed JSON data or null if error occurs * @returns {Object|null} Parsed JSON data or null if error occurs
*/ */
function readJSON(filepath) { function readJSON(filepath) {
// Get debug flag dynamically from config-manager
const isDebug = getDebugFlag();
try { try {
const rawData = fs.readFileSync(filepath, 'utf8'); const rawData = fs.readFileSync(filepath, 'utf8');
return JSON.parse(rawData); return JSON.parse(rawData);
} catch (error) { } catch (error) {
log('error', `Error reading JSON file ${filepath}:`, error.message); log('error', `Error reading JSON file ${filepath}:`, error.message);
if (CONFIG.debug) { if (isDebug) {
// Use dynamic debug flag
// Use log utility for debug output too // Use log utility for debug output too
log('error', 'Full error details:', error); log('error', 'Full error details:', error);
} }
@@ -125,6 +169,8 @@ function readJSON(filepath) {
* @param {Object} data - Data to write * @param {Object} data - Data to write
*/ */
function writeJSON(filepath, data) { function writeJSON(filepath, data) {
// Get debug flag dynamically from config-manager
const isDebug = getDebugFlag();
try { try {
const dir = path.dirname(filepath); const dir = path.dirname(filepath);
if (!fs.existsSync(dir)) { if (!fs.existsSync(dir)) {
@@ -133,7 +179,8 @@ function writeJSON(filepath, data) {
fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8'); fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8');
} catch (error) { } catch (error) {
log('error', `Error writing JSON file ${filepath}:`, error.message); log('error', `Error writing JSON file ${filepath}:`, error.message);
if (CONFIG.debug) { if (isDebug) {
// Use dynamic debug flag
// Use log utility for debug output too // Use log utility for debug output too
log('error', 'Full error details:', error); log('error', 'Full error details:', error);
} }
@@ -156,6 +203,8 @@ function sanitizePrompt(prompt) {
* @returns {Object|null} The parsed complexity report or null if not found * @returns {Object|null} The parsed complexity report or null if not found
*/ */
function readComplexityReport(customPath = null) { function readComplexityReport(customPath = null) {
// Get debug flag dynamically from config-manager
const isDebug = getDebugFlag();
try { try {
const reportPath = const reportPath =
customPath || customPath ||
@@ -168,6 +217,11 @@ function readComplexityReport(customPath = null) {
return JSON.parse(reportData); return JSON.parse(reportData);
} catch (error) { } catch (error) {
log('warn', `Could not read complexity report: ${error.message}`); log('warn', `Could not read complexity report: ${error.message}`);
// Optionally log full error in debug mode
if (isDebug) {
// Use dynamic debug flag
log('error', 'Full error details:', error);
}
return null; return null;
} }
} }
@@ -399,7 +453,8 @@ function detectCamelCaseFlags(args) {
// Export all utility functions and configuration // Export all utility functions and configuration
export { export {
CONFIG, // CONFIG, <-- Already Removed
// getConfig <-- Removing now
LOG_LEVELS, LOG_LEVELS,
log, log,
readJSON, readJSON,
@@ -417,5 +472,8 @@ export {
enableSilentMode, enableSilentMode,
disableSilentMode, disableSilentMode,
isSilentMode, isSilentMode,
getTaskManager resolveEnvVariable,
getTaskManager,
findProjectRoot
// getConfig <-- Removed
}; };

View File

@@ -0,0 +1,191 @@
/**
* src/ai-providers/anthropic.js
*
* Implementation for interacting with Anthropic models (e.g., Claude)
* using the Vercel AI SDK.
*/
import { createAnthropic } from '@ai-sdk/anthropic';
import { generateText, streamText, generateObject, streamObject } from 'ai';
import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible
// TODO: Implement standardized functions for generateText, streamText, generateObject
// --- Client Instantiation ---
// Note: API key resolution should ideally happen closer to the call site
// using the config manager/resolver which checks process.env and session.env.
// This is a placeholder for basic functionality.
// Remove the global variable and caching logic
// let anthropicClient;
function getClient(apiKey) {
if (!apiKey) {
// In a real scenario, this would use the config resolver.
// Throwing error here if key isn't passed for simplicity.
// Keep the error check for the passed key
throw new Error('Anthropic API key is required.');
}
// Remove the check for anthropicClient
// if (!anthropicClient) {
// TODO: Explore passing options like default headers if needed
// Create and return a new instance directly
return createAnthropic({
apiKey: apiKey
});
// }
// return anthropicClient;
}
// --- Standardized Service Function Implementations ---
/**
* Generates text using an Anthropic model.
*
* @param {object} params - Parameters for the text generation.
* @param {string} params.apiKey - The Anthropic API key.
* @param {string} params.modelId - The specific Anthropic model ID to use (e.g., 'claude-3-haiku-20240307').
* @param {string} params.systemPrompt - The system prompt.
* @param {string} params.userPrompt - The user prompt.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If the API call fails.
*/
export async function generateAnthropicText({
apiKey,
modelId,
systemPrompt,
userPrompt,
maxTokens,
temperature
}) {
log('debug', `Generating Anthropic text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const result = await generateText({
model: client(modelId), // Pass the model ID to the client instance
system: systemPrompt,
prompt: userPrompt,
maxTokens: maxTokens,
temperature: temperature
// TODO: Add other relevant parameters like topP, topK if needed
});
log(
'debug',
`Anthropic generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.text;
} catch (error) {
log('error', `Anthropic generateText failed: ${error.message}`);
// Consider more specific error handling or re-throwing a standardized error
throw error;
}
}
/**
* Streams text using an Anthropic model.
*
* @param {object} params - Parameters for the text streaming.
* @param {string} params.apiKey - The Anthropic API key.
* @param {string} params.modelId - The specific Anthropic model ID.
* @param {string} params.systemPrompt - The system prompt.
* @param {string} params.userPrompt - The user prompt.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @returns {Promise<ReadableStream<string>>} A readable stream of text deltas.
* @throws {Error} If the API call fails to initiate the stream.
*/
export async function streamAnthropicText({
apiKey,
modelId,
systemPrompt,
userPrompt,
maxTokens,
temperature
}) {
log('debug', `Streaming Anthropic text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const stream = await streamText({
model: client(modelId),
system: systemPrompt,
prompt: userPrompt,
maxTokens: maxTokens,
temperature: temperature
// TODO: Add other relevant parameters
});
// We return the stream directly. The consumer will handle reading it.
// We could potentially wrap it or add logging within the stream pipe if needed.
return stream.textStream;
} catch (error) {
log('error', `Anthropic streamText failed: ${error.message}`);
throw error;
}
}
/**
* Generates a structured object using an Anthropic model.
* NOTE: Anthropic's tool/function calling support might have limitations
* compared to OpenAI, especially regarding complex schemas or enforcement.
* The Vercel AI SDK attempts to abstract this.
*
* @param {object} params - Parameters for object generation.
* @param {string} params.apiKey - The Anthropic API key.
* @param {string} params.modelId - The specific Anthropic model ID.
* @param {string} params.systemPrompt - The system prompt (optional).
* @param {string} params.userPrompt - The user prompt describing the desired object.
* @param {import('zod').ZodSchema} params.schema - The Zod schema for the object.
* @param {string} params.objectName - A name for the object/tool.
* @param {number} [params.maxTokens] - Maximum tokens for the response.
* @param {number} [params.temperature] - Temperature for generation.
* @param {number} [params.maxRetries] - Max retries for validation/generation.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If generation or validation fails.
*/
export async function generateAnthropicObject({
apiKey,
modelId,
systemPrompt,
userPrompt,
schema,
objectName = 'generated_object', // Provide a default name
maxTokens,
temperature,
maxRetries = 3
}) {
log(
'debug',
`Generating Anthropic object ('${objectName}') with model: ${modelId}`
);
try {
const client = getClient(apiKey);
const result = await generateObject({
model: client(modelId),
mode: 'tool', // Anthropic generally uses 'tool' mode for structured output
schema: schema,
system: systemPrompt,
prompt: userPrompt,
tool: {
name: objectName, // Use the provided or default name
description: `Generate a ${objectName} based on the prompt.` // Simple description
},
maxTokens: maxTokens,
temperature: temperature,
maxRetries: maxRetries
});
log(
'debug',
`Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.object;
} catch (error) {
log(
'error',
`Anthropic generateObject ('${objectName}') failed: ${error.message}`
);
throw error;
}
}
// TODO: Implement streamAnthropicObject if needed and supported well by the SDK for Anthropic.
// The basic structure would be similar to generateAnthropicObject but using streamObject.

View File

@@ -0,0 +1,176 @@
/**
* src/ai-providers/perplexity.js
*
* Implementation for interacting with Perplexity models
* using the Vercel AI SDK.
*/
import { createPerplexity } from '@ai-sdk/perplexity';
import { generateText, streamText, generateObject, streamObject } from 'ai';
import { log } from '../../scripts/modules/utils.js';
// --- Client Instantiation ---
// Similar to Anthropic, this expects the resolved API key to be passed in.
function getClient(apiKey) {
if (!apiKey) {
throw new Error('Perplexity API key is required.');
}
// Create and return a new instance directly
return createPerplexity({
apiKey: apiKey
});
}
// --- Standardized Service Function Implementations ---
/**
* Generates text using a Perplexity model.
*
* @param {object} params - Parameters for text generation.
* @param {string} params.apiKey - The Perplexity API key.
* @param {string} params.modelId - The Perplexity model ID (e.g., 'sonar-small-32k-online').
* @param {string} [params.systemPrompt] - The system prompt (optional for some models).
* @param {string} params.userPrompt - The user prompt.
* @param {number} [params.maxTokens] - Maximum tokens.
* @param {number} [params.temperature] - Temperature.
* @returns {Promise<string>} Generated text.
*/
export async function generatePerplexityText({
apiKey,
modelId,
systemPrompt,
userPrompt,
maxTokens,
temperature
}) {
log('debug', `Generating Perplexity text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const result = await generateText({
model: client(modelId),
system: systemPrompt, // Pass system prompt if provided
prompt: userPrompt,
maxTokens: maxTokens,
temperature: temperature
});
log(
'debug',
`Perplexity generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.text;
} catch (error) {
log('error', `Perplexity generateText failed: ${error.message}`);
throw error;
}
}
/**
* Streams text using a Perplexity model.
*
* @param {object} params - Parameters for text streaming.
* @param {string} params.apiKey - The Perplexity API key.
* @param {string} params.modelId - The Perplexity model ID.
* @param {string} [params.systemPrompt] - The system prompt.
* @param {string} params.userPrompt - The user prompt.
* @param {number} [params.maxTokens] - Maximum tokens.
* @param {number} [params.temperature] - Temperature.
* @returns {Promise<ReadableStream<string>>} Stream of text deltas.
*/
export async function streamPerplexityText({
apiKey,
modelId,
systemPrompt,
userPrompt,
maxTokens,
temperature
}) {
log('debug', `Streaming Perplexity text with model: ${modelId}`);
try {
const client = getClient(apiKey);
const stream = await streamText({
model: client(modelId),
system: systemPrompt,
prompt: userPrompt,
maxTokens: maxTokens,
temperature: temperature
});
return stream.textStream;
} catch (error) {
log('error', `Perplexity streamText failed: ${error.message}`);
throw error;
}
}
/**
* Generates a structured object using a Perplexity model.
* Note: Perplexity's support for structured output/tool use might vary.
* We assume it follows OpenAI's function/tool calling conventions if supported by the SDK.
*
* @param {object} params - Parameters for object generation.
* @param {string} params.apiKey - The Perplexity API key.
* @param {string} params.modelId - The Perplexity model ID.
* @param {string} [params.systemPrompt] - System prompt.
* @param {string} params.userPrompt - User prompt.
* @param {import('zod').ZodSchema} params.schema - Zod schema.
* @param {string} params.objectName - Name for the object/tool.
* @param {number} [params.maxTokens] - Maximum tokens.
* @param {number} [params.temperature] - Temperature.
* @param {number} [params.maxRetries] - Max retries.
* @returns {Promise<object>} Generated object.
*/
export async function generatePerplexityObject({
apiKey,
modelId,
systemPrompt,
userPrompt,
schema,
objectName = 'generated_object',
maxTokens,
temperature,
maxRetries = 3
}) {
log(
'debug',
`Generating Perplexity object ('${objectName}') with model: ${modelId}`
);
try {
const client = getClient(apiKey);
// Assuming Perplexity follows OpenAI-like tool mode if supported by SDK
const result = await generateObject({
model: client(modelId),
mode: 'tool',
schema: schema,
system: systemPrompt,
prompt: userPrompt,
tool: {
name: objectName,
description: `Generate a ${objectName} based on the prompt.`
},
maxTokens: maxTokens,
temperature: temperature,
maxRetries: maxRetries
});
log(
'debug',
`Perplexity generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}`
);
return result.object;
} catch (error) {
log(
'error',
`Perplexity generateObject ('${objectName}') failed: ${error.message}`
);
// Check if the error indicates lack of tool support
if (
error.message.includes('tool use') ||
error.message.includes('structured output')
) {
log(
'warn',
`Model ${modelId} might not support structured output via tools.`
);
}
throw error;
}
}
// TODO: Implement streamPerplexityObject if needed and supported.

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,683 @@
import { jest } from '@jest/globals';
// Mock ai-client-factory
const mockGetClient = jest.fn();
jest.unstable_mockModule('../../scripts/modules/ai-client-factory.js', () => ({
getClient: mockGetClient
}));
// Mock AI SDK Core
const mockGenerateText = jest.fn();
jest.unstable_mockModule('ai', () => ({
generateText: mockGenerateText
// Mock other AI SDK functions like streamText as needed
}));
// Mock utils logger
const mockLog = jest.fn();
jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({
log: mockLog
// Keep other exports if utils has more, otherwise just log
}));
// Import the module to test (AFTER mocks)
const { generateTextService } = await import(
'../../scripts/modules/ai-services-unified.js'
);
describe('Unified AI Services', () => {
beforeEach(() => {
// Clear mocks before each test
mockGetClient.mockClear();
mockGenerateText.mockClear();
mockLog.mockClear(); // Clear log mock
});
describe('generateTextService', () => {
test('should get client and call generateText with correct parameters', async () => {
const mockClient = { type: 'mock-client' };
mockGetClient.mockResolvedValue(mockClient);
mockGenerateText.mockResolvedValue({ text: 'Mock response' });
const serviceParams = {
role: 'main',
session: { env: { SOME_KEY: 'value' } }, // Example session
overrideOptions: { provider: 'override' }, // Example overrides
prompt: 'Test prompt',
// Other generateText options like maxTokens, temperature etc.
maxTokens: 100
};
const result = await generateTextService(serviceParams);
// Verify getClient call
expect(mockGetClient).toHaveBeenCalledTimes(1);
expect(mockGetClient).toHaveBeenCalledWith(
serviceParams.role,
serviceParams.session,
serviceParams.overrideOptions
);
// Verify generateText call
expect(mockGenerateText).toHaveBeenCalledTimes(1);
expect(mockGenerateText).toHaveBeenCalledWith({
model: mockClient, // Ensure the correct client is passed
prompt: serviceParams.prompt,
maxTokens: serviceParams.maxTokens
// Add other expected generateText options here
});
// Verify result
expect(result).toEqual({ text: 'Mock response' });
});
test('should retry generateText on specific errors and succeed', async () => {
const mockClient = { type: 'mock-client' };
mockGetClient.mockResolvedValue(mockClient);
// Simulate failure then success
mockGenerateText
.mockRejectedValueOnce(new Error('Rate limit exceeded')) // Retryable error
.mockRejectedValueOnce(new Error('Service temporarily unavailable')) // Retryable error
.mockResolvedValue({ text: 'Success after retries' });
const serviceParams = { role: 'main', prompt: 'Retry test' };
// Use jest.advanceTimersByTime for delays if implemented
// jest.useFakeTimers();
const result = await generateTextService(serviceParams);
expect(mockGetClient).toHaveBeenCalledTimes(1); // Client fetched once
expect(mockGenerateText).toHaveBeenCalledTimes(3); // Initial call + 2 retries
expect(result).toEqual({ text: 'Success after retries' });
// jest.useRealTimers(); // Restore real timers if faked
});
test('should fail after exhausting retries', async () => {
jest.setTimeout(15000); // Increase timeout further
const mockClient = { type: 'mock-client' };
mockGetClient.mockResolvedValue(mockClient);
// Simulate persistent failure
mockGenerateText.mockRejectedValue(new Error('Rate limit exceeded'));
const serviceParams = { role: 'main', prompt: 'Retry failure test' };
await expect(generateTextService(serviceParams)).rejects.toThrow(
'Rate limit exceeded'
);
// Sequence is main -> fallback -> research. It tries all client gets even if main fails.
expect(mockGetClient).toHaveBeenCalledTimes(3);
expect(mockGenerateText).toHaveBeenCalledTimes(3); // Initial call + max retries (assuming 2 retries)
});
test('should not retry on non-retryable errors', async () => {
const mockMainClient = { type: 'mock-main' };
const mockFallbackClient = { type: 'mock-fallback' };
const mockResearchClient = { type: 'mock-research' };
// Simulate a non-retryable error
const nonRetryableError = new Error('Invalid request parameters');
mockGenerateText.mockRejectedValueOnce(nonRetryableError); // Fail only once
const serviceParams = { role: 'main', prompt: 'No retry test' };
// Sequence is main -> fallback -> research. Even if main fails non-retryably,
// it will still try to get clients for fallback and research before throwing.
// Let's assume getClient succeeds for all three.
mockGetClient
.mockResolvedValueOnce(mockMainClient)
.mockResolvedValueOnce(mockFallbackClient)
.mockResolvedValueOnce(mockResearchClient);
await expect(generateTextService(serviceParams)).rejects.toThrow(
'Invalid request parameters'
);
expect(mockGetClient).toHaveBeenCalledTimes(3); // Tries main, fallback, research
expect(mockGenerateText).toHaveBeenCalledTimes(1); // Called only once for main
});
test('should log service entry, client info, attempts, and success', async () => {
const mockClient = {
type: 'mock-client',
provider: 'test-provider',
model: 'test-model'
}; // Add mock details
mockGetClient.mockResolvedValue(mockClient);
mockGenerateText.mockResolvedValue({ text: 'Success' });
const serviceParams = { role: 'main', prompt: 'Log test' };
await generateTextService(serviceParams);
// Check logs (in order)
expect(mockLog).toHaveBeenNthCalledWith(
1,
'info',
'generateTextService called',
{ role: 'main' }
);
expect(mockLog).toHaveBeenNthCalledWith(
2,
'info',
'Attempting service call with role: main'
);
expect(mockLog).toHaveBeenNthCalledWith(
3,
'info',
'Retrieved AI client',
{
provider: mockClient.provider,
model: mockClient.model
}
);
expect(mockLog).toHaveBeenNthCalledWith(
4,
expect.stringMatching(
/Attempt 1\/3 calling generateText for role main/i
)
);
expect(mockLog).toHaveBeenNthCalledWith(
5,
'info',
'generateText succeeded for role main on attempt 1' // Original success log from helper
);
expect(mockLog).toHaveBeenNthCalledWith(
6,
'info',
'generateTextService succeeded using role: main' // Final success log from service
);
// Ensure no failure/retry logs were called
expect(mockLog).not.toHaveBeenCalledWith(
'warn',
expect.stringContaining('failed')
);
expect(mockLog).not.toHaveBeenCalledWith(
'info',
expect.stringContaining('Retrying')
);
});
test('should log retry attempts and eventual failure', async () => {
jest.setTimeout(15000); // Increase timeout further
const mockClient = {
type: 'mock-client',
provider: 'test-provider',
model: 'test-model'
};
const mockFallbackClient = { type: 'mock-fallback' };
const mockResearchClient = { type: 'mock-research' };
mockGetClient
.mockResolvedValueOnce(mockClient)
.mockResolvedValueOnce(mockFallbackClient)
.mockResolvedValueOnce(mockResearchClient);
mockGenerateText.mockRejectedValue(new Error('Rate limit'));
const serviceParams = { role: 'main', prompt: 'Log retry failure' };
await expect(generateTextService(serviceParams)).rejects.toThrow(
'Rate limit'
);
// Check logs
expect(mockLog).toHaveBeenCalledWith(
'info',
'generateTextService called',
{ role: 'main' }
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: main'
);
expect(mockLog).toHaveBeenCalledWith('info', 'Retrieved AI client', {
provider: mockClient.provider,
model: mockClient.model
});
expect(mockLog).toHaveBeenCalledWith(
expect.stringMatching(
/Attempt 1\/3 calling generateText for role main/i
)
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
'Attempt 1 failed for role main: Rate limit'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Retryable error detected. Retrying in 1s...'
);
expect(mockLog).toHaveBeenCalledWith(
expect.stringMatching(
/Attempt 2\/3 calling generateText for role main/i
)
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
'Attempt 2 failed for role main: Rate limit'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Retryable error detected. Retrying in 2s...'
);
expect(mockLog).toHaveBeenCalledWith(
expect.stringMatching(
/Attempt 3\/3 calling generateText for role main/i
)
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
'Attempt 3 failed for role main: Rate limit'
);
expect(mockLog).toHaveBeenCalledWith(
'error',
'Non-retryable error or max retries reached for role main (generateText).'
);
// Check subsequent fallback attempts (which also fail)
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: fallback'
);
expect(mockLog).toHaveBeenCalledWith(
'error',
'Service call failed for role fallback: Rate limit'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: research'
);
expect(mockLog).toHaveBeenCalledWith(
'error',
'Service call failed for role research: Rate limit'
);
expect(mockLog).toHaveBeenCalledWith(
'error',
'All roles in the sequence [main,fallback,research] failed.'
);
});
test('should use fallback client after primary fails, then succeed', async () => {
const mockMainClient = { type: 'mock-client', provider: 'main-provider' };
const mockFallbackClient = {
type: 'mock-client',
provider: 'fallback-provider'
};
// Setup calls: main client fails, fallback succeeds
mockGetClient
.mockResolvedValueOnce(mockMainClient) // First call for 'main' role
.mockResolvedValueOnce(mockFallbackClient); // Second call for 'fallback' role
mockGenerateText
.mockRejectedValueOnce(new Error('Main Rate limit')) // Main attempt 1 fail
.mockRejectedValueOnce(new Error('Main Rate limit')) // Main attempt 2 fail
.mockRejectedValueOnce(new Error('Main Rate limit')) // Main attempt 3 fail
.mockResolvedValue({ text: 'Fallback success' }); // Fallback attempt 1 success
const serviceParams = { role: 'main', prompt: 'Fallback test' };
const result = await generateTextService(serviceParams);
// Check calls
expect(mockGetClient).toHaveBeenCalledTimes(2);
expect(mockGetClient).toHaveBeenNthCalledWith(
1,
'main',
undefined,
undefined
);
expect(mockGetClient).toHaveBeenNthCalledWith(
2,
'fallback',
undefined,
undefined
);
expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 main fails, 1 fallback success
expect(mockGenerateText).toHaveBeenNthCalledWith(4, {
model: mockFallbackClient,
prompt: 'Fallback test'
});
expect(result).toEqual({ text: 'Fallback success' });
// Check logs for fallback attempt
expect(mockLog).toHaveBeenCalledWith(
'error',
'Service call failed for role main: Main Rate limit'
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
'Retries exhausted or non-retryable error for role main, trying next role in sequence...'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: fallback'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'generateTextService succeeded using role: fallback'
);
});
test('should use research client after primary and fallback fail, then succeed', async () => {
const mockMainClient = { type: 'mock-client', provider: 'main-provider' };
const mockFallbackClient = {
type: 'mock-client',
provider: 'fallback-provider'
};
const mockResearchClient = {
type: 'mock-client',
provider: 'research-provider'
};
// Setup calls: main fails, fallback fails, research succeeds
mockGetClient
.mockResolvedValueOnce(mockMainClient)
.mockResolvedValueOnce(mockFallbackClient)
.mockResolvedValueOnce(mockResearchClient);
mockGenerateText
.mockRejectedValueOnce(new Error('Main fail 1')) // Main 1
.mockRejectedValueOnce(new Error('Main fail 2')) // Main 2
.mockRejectedValueOnce(new Error('Main fail 3')) // Main 3
.mockRejectedValueOnce(new Error('Fallback fail 1')) // Fallback 1
.mockRejectedValueOnce(new Error('Fallback fail 2')) // Fallback 2
.mockRejectedValueOnce(new Error('Fallback fail 3')) // Fallback 3
.mockResolvedValue({ text: 'Research success' }); // Research 1 success
const serviceParams = { role: 'main', prompt: 'Research fallback test' };
const result = await generateTextService(serviceParams);
// Check calls
expect(mockGetClient).toHaveBeenCalledTimes(3);
expect(mockGetClient).toHaveBeenNthCalledWith(
1,
'main',
undefined,
undefined
);
expect(mockGetClient).toHaveBeenNthCalledWith(
2,
'fallback',
undefined,
undefined
);
expect(mockGetClient).toHaveBeenNthCalledWith(
3,
'research',
undefined,
undefined
);
expect(mockGenerateText).toHaveBeenCalledTimes(7); // 3 main, 3 fallback, 1 research
expect(mockGenerateText).toHaveBeenNthCalledWith(7, {
model: mockResearchClient,
prompt: 'Research fallback test'
});
expect(result).toEqual({ text: 'Research success' });
// Check logs for fallback attempt
expect(mockLog).toHaveBeenCalledWith(
'error',
'Service call failed for role main: Main fail 3' // Error from last attempt for role
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
'Retries exhausted or non-retryable error for role main, trying next role in sequence...'
);
expect(mockLog).toHaveBeenCalledWith(
'error',
'Service call failed for role fallback: Fallback fail 3' // Error from last attempt for role
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
'Retries exhausted or non-retryable error for role fallback, trying next role in sequence...'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: research'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'generateTextService succeeded using role: research'
);
});
test('should fail if primary, fallback, and research clients all fail', async () => {
const mockMainClient = { type: 'mock-client', provider: 'main' };
const mockFallbackClient = { type: 'mock-client', provider: 'fallback' };
const mockResearchClient = { type: 'mock-client', provider: 'research' };
// Setup calls: all fail
mockGetClient
.mockResolvedValueOnce(mockMainClient)
.mockResolvedValueOnce(mockFallbackClient)
.mockResolvedValueOnce(mockResearchClient);
mockGenerateText
.mockRejectedValueOnce(new Error('Main fail 1'))
.mockRejectedValueOnce(new Error('Main fail 2'))
.mockRejectedValueOnce(new Error('Main fail 3'))
.mockRejectedValueOnce(new Error('Fallback fail 1'))
.mockRejectedValueOnce(new Error('Fallback fail 2'))
.mockRejectedValueOnce(new Error('Fallback fail 3'))
.mockRejectedValueOnce(new Error('Research fail 1'))
.mockRejectedValueOnce(new Error('Research fail 2'))
.mockRejectedValueOnce(new Error('Research fail 3')); // Last error
const serviceParams = { role: 'main', prompt: 'All fail test' };
await expect(generateTextService(serviceParams)).rejects.toThrow(
'Research fail 3' // Should throw the error from the LAST failed attempt
);
// Check calls
expect(mockGetClient).toHaveBeenCalledTimes(3);
expect(mockGenerateText).toHaveBeenCalledTimes(9); // 3 for each role
expect(mockLog).toHaveBeenCalledWith(
'error',
'All roles in the sequence [main,fallback,research] failed.'
);
});
test('should handle error getting fallback client', async () => {
const mockMainClient = { type: 'mock-client', provider: 'main' };
// Setup calls: main fails, getting fallback client fails, research succeeds (to test sequence)
const mockResearchClient = { type: 'mock-client', provider: 'research' };
mockGetClient
.mockResolvedValueOnce(mockMainClient)
.mockRejectedValueOnce(new Error('Cannot get fallback client'))
.mockResolvedValueOnce(mockResearchClient);
mockGenerateText
.mockRejectedValueOnce(new Error('Main fail 1'))
.mockRejectedValueOnce(new Error('Main fail 2'))
.mockRejectedValueOnce(new Error('Main fail 3')) // Main fails 3 times
.mockResolvedValue({ text: 'Research success' }); // Research succeeds on its 1st attempt
const serviceParams = { role: 'main', prompt: 'Fallback client error' };
// Should eventually succeed with research after main+fallback fail
const result = await generateTextService(serviceParams);
expect(result).toEqual({ text: 'Research success' });
expect(mockGetClient).toHaveBeenCalledTimes(3); // Tries main, fallback (fails), research
expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 main attempts, 1 research attempt
expect(mockLog).toHaveBeenCalledWith(
'error',
'Service call failed for role fallback: Cannot get fallback client'
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
'Could not get client for role fallback, trying next role in sequence...'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: research'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
expect.stringContaining(
'generateTextService succeeded using role: research'
)
);
});
test('should try research after fallback fails if initial role is fallback', async () => {
const mockFallbackClient = { type: 'mock-client', provider: 'fallback' };
const mockResearchClient = { type: 'mock-client', provider: 'research' };
mockGetClient
.mockResolvedValueOnce(mockFallbackClient)
.mockResolvedValueOnce(mockResearchClient);
mockGenerateText
.mockRejectedValueOnce(new Error('Fallback fail 1')) // Fallback 1
.mockRejectedValueOnce(new Error('Fallback fail 2')) // Fallback 2
.mockRejectedValueOnce(new Error('Fallback fail 3')) // Fallback 3
.mockResolvedValue({ text: 'Research success' }); // Research 1
const serviceParams = { role: 'fallback', prompt: 'Start with fallback' };
const result = await generateTextService(serviceParams);
expect(mockGetClient).toHaveBeenCalledTimes(2); // Fallback, Research
expect(mockGetClient).toHaveBeenNthCalledWith(
1,
'fallback',
undefined,
undefined
);
expect(mockGetClient).toHaveBeenNthCalledWith(
2,
'research',
undefined,
undefined
);
expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 fallback, 1 research
expect(result).toEqual({ text: 'Research success' });
// Check logs for sequence
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: fallback'
);
expect(mockLog).toHaveBeenCalledWith(
'error',
'Service call failed for role fallback: Fallback fail 3'
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
expect.stringContaining(
'Retries exhausted or non-retryable error for role fallback'
)
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: research'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
expect.stringContaining(
'generateTextService succeeded using role: research'
)
);
});
test('should try fallback after research fails if initial role is research', async () => {
const mockResearchClient = { type: 'mock-client', provider: 'research' };
const mockFallbackClient = { type: 'mock-client', provider: 'fallback' };
mockGetClient
.mockResolvedValueOnce(mockResearchClient)
.mockResolvedValueOnce(mockFallbackClient);
mockGenerateText
.mockRejectedValueOnce(new Error('Research fail 1')) // Research 1
.mockRejectedValueOnce(new Error('Research fail 2')) // Research 2
.mockRejectedValueOnce(new Error('Research fail 3')) // Research 3
.mockResolvedValue({ text: 'Fallback success' }); // Fallback 1
const serviceParams = { role: 'research', prompt: 'Start with research' };
const result = await generateTextService(serviceParams);
expect(mockGetClient).toHaveBeenCalledTimes(2); // Research, Fallback
expect(mockGetClient).toHaveBeenNthCalledWith(
1,
'research',
undefined,
undefined
);
expect(mockGetClient).toHaveBeenNthCalledWith(
2,
'fallback',
undefined,
undefined
);
expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 research, 1 fallback
expect(result).toEqual({ text: 'Fallback success' });
// Check logs for sequence
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: research'
);
expect(mockLog).toHaveBeenCalledWith(
'error',
'Service call failed for role research: Research fail 3'
);
expect(mockLog).toHaveBeenCalledWith(
'warn',
expect.stringContaining(
'Retries exhausted or non-retryable error for role research'
)
);
expect(mockLog).toHaveBeenCalledWith(
'info',
'Attempting service call with role: fallback'
);
expect(mockLog).toHaveBeenCalledWith(
'info',
expect.stringContaining(
'generateTextService succeeded using role: fallback'
)
);
});
test('should use default sequence and log warning for unknown initial role', async () => {
const mockMainClient = { type: 'mock-client', provider: 'main' };
const mockFallbackClient = { type: 'mock-client', provider: 'fallback' };
mockGetClient
.mockResolvedValueOnce(mockMainClient)
.mockResolvedValueOnce(mockFallbackClient);
mockGenerateText
.mockRejectedValueOnce(new Error('Main fail 1')) // Main 1
.mockRejectedValueOnce(new Error('Main fail 2')) // Main 2
.mockRejectedValueOnce(new Error('Main fail 3')) // Main 3
.mockResolvedValue({ text: 'Fallback success' }); // Fallback 1
const serviceParams = {
role: 'invalid-role',
prompt: 'Unknown role test'
};
const result = await generateTextService(serviceParams);
// Check warning log for unknown role
expect(mockLog).toHaveBeenCalledWith(
'warn',
'Unknown initial role: invalid-role. Defaulting to main -> fallback -> research sequence.'
);
// Check it followed the default main -> fallback sequence
expect(mockGetClient).toHaveBeenCalledTimes(2); // Main, Fallback
expect(mockGetClient).toHaveBeenNthCalledWith(
1,
'main',
undefined,
undefined
);
expect(mockGetClient).toHaveBeenNthCalledWith(
2,
'fallback',
undefined,
undefined
);
expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 main, 1 fallback
expect(result).toEqual({ text: 'Fallback success' });
});
});
});