feat(config): Implement new config system and resolve refactoring errors Introduced config-manager.js and new utilities (resolveEnvVariable, findProjectRoot). Removed old global CONFIG object from utils.js. Updated .taskmasterconfig, mcp.json, and .env.example. Added generateComplexityAnalysisPrompt to ui.js. Removed unused updateSubtaskById from task-manager.js. Resolved SyntaxError and ReferenceError issues across commands.js, ui.js, task-manager.js, and ai-services.js by replacing CONFIG references with config-manager getters (getDebugFlag, getProjectName, getDefaultSubtasks, isApiKeySet). Refactored 'models' command to use getConfig/writeConfig. Simplified version checking. This stabilizes the codebase after initial Task 61 refactoring, fixing CLI errors and enabling subsequent work on Subtasks 61.34 and 61.35.

This commit is contained in:
Eyal Toledano
2025-04-20 01:09:30 -04:00
parent 11b8d1bda5
commit 538b874582
16 changed files with 3454 additions and 797 deletions

View File

@@ -0,0 +1,368 @@
/**
* ai-services-unified.js
* Centralized AI service layer using ai-client-factory and AI SDK core functions.
*/
import { generateText } from 'ai';
import { getClient } from './ai-client-factory.js';
import { log } from './utils.js'; // Import log for retry logging
// Import logger from utils later when needed
// import { log } from './utils.js';
// --- Configuration for Retries ---
const MAX_RETRIES = 2; // Total attempts = 1 + MAX_RETRIES
const INITIAL_RETRY_DELAY_MS = 1000; // 1 second
// Helper function to check if an error is retryable
function isRetryableError(error) {
const errorMessage = error.message?.toLowerCase() || '';
// Add common retryable error patterns
return (
errorMessage.includes('rate limit') ||
errorMessage.includes('overloaded') ||
errorMessage.includes('service temporarily unavailable') ||
errorMessage.includes('timeout') ||
errorMessage.includes('network error') ||
// Add specific status codes if available from the SDK errors
error.status === 429 || // Too Many Requests
error.status >= 500 // Server-side errors
);
}
/**
* Internal helper to attempt an AI SDK API call with retries.
*
* @param {object} client - The AI client instance.
* @param {function} apiCallFn - The AI SDK function to call (e.g., generateText).
* @param {object} apiParams - Parameters for the AI SDK function (excluding model).
* @param {string} attemptRole - The role being attempted (for logging).
* @returns {Promise<object>} The result from the successful API call.
* @throws {Error} If the call fails after all retries.
*/
async function _attemptApiCallWithRetries(
client,
apiCallFn,
apiParams,
attemptRole
) {
let retries = 0;
while (retries <= MAX_RETRIES) {
try {
log(
'info',
`Attempt ${retries + 1}/${MAX_RETRIES + 1} calling ${apiCallFn.name} for role ${attemptRole}`
);
// Call the provided AI SDK function (generateText, streamText, etc.)
const result = await apiCallFn({ model: client, ...apiParams });
log(
'info',
`${apiCallFn.name} succeeded for role ${attemptRole} on attempt ${retries + 1}`
);
return result; // Success!
} catch (error) {
log(
'warn',
`Attempt ${retries + 1} failed for role ${attemptRole} (${apiCallFn.name}): ${error.message}`
);
if (isRetryableError(error) && retries < MAX_RETRIES) {
retries++;
const delay = INITIAL_RETRY_DELAY_MS * Math.pow(2, retries - 1);
log(
'info',
`Retryable error detected. Retrying in ${delay / 1000}s...`
);
await new Promise((resolve) => setTimeout(resolve, delay));
} else {
log(
'error',
`Non-retryable error or max retries reached for role ${attemptRole} (${apiCallFn.name}).`
);
throw error; // Final failure for this attempt chain
}
}
}
// Should theoretically not be reached due to throw in the else block, but needed for linting/type safety
throw new Error(
`Exhausted all retries for role ${attemptRole} (${apiCallFn.name})`
);
}
/**
* Unified service function for generating text.
* Handles client retrieval, retries, and fallback (main -> fallback -> research).
* TODO: Add detailed logging.
*
* @param {object} params - Parameters for the service call.
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
* @param {object} [params.session=null] - Optional MCP session object.
* @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory { provider, modelId }.
* @param {string} params.prompt - The prompt for the AI.
* @param {number} [params.maxTokens] - Max tokens for the generation.
* @param {number} [params.temperature] - Temperature setting.
* // ... include other standard generateText options as needed ...
* @returns {Promise<object>} The result from the AI SDK's generateText function.
*/
async function generateTextService(params) {
const {
role: initialRole,
session,
overrideOptions,
...generateTextParams
} = params;
log('info', 'generateTextService called', { role: initialRole });
// Determine the sequence explicitly based on the initial role
let sequence;
if (initialRole === 'main') {
sequence = ['main', 'fallback', 'research'];
} else if (initialRole === 'fallback') {
sequence = ['fallback', 'research']; // Try fallback, then research
} else if (initialRole === 'research') {
sequence = ['research', 'fallback']; // Try research, then fallback
} else {
// Default sequence if initialRole is unknown or invalid
log(
'warn',
`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`
);
sequence = ['main', 'fallback', 'research'];
}
let lastError = null;
// Iterate through the determined sequence
for (const currentRole of sequence) {
// Removed the complex conditional check, as the sequence is now pre-determined
log('info', `Attempting service call with role: ${currentRole}`);
let client;
try {
client = await getClient(currentRole, session, overrideOptions);
const clientInfo = {
provider: client?.provider || 'unknown',
model: client?.modelId || client?.model || 'unknown'
};
log('info', 'Retrieved AI client', clientInfo);
// Attempt the API call with retries using the helper
const result = await _attemptApiCallWithRetries(
client,
generateText,
generateTextParams,
currentRole
);
log('info', `generateTextService succeeded using role: ${currentRole}`); // Add success log
return result; // Success!
} catch (error) {
log(
'error', // Log as error since this role attempt failed
`Service call failed for role ${currentRole}: ${error.message}`
);
lastError = error; // Store the error to throw if all roles in sequence fail
// Log the reason for moving to the next role
if (!client) {
log(
'warn',
`Could not get client for role ${currentRole}, trying next role in sequence...`
);
} else {
// Error happened during API call after client was retrieved
log(
'warn',
`Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...`
);
}
// Continue to the next role in the sequence automatically
}
}
// If loop completes, all roles in the sequence failed
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
throw (
lastError ||
new Error(
'AI service call failed for all configured roles in the sequence.'
)
);
}
// TODO: Implement streamTextService, generateObjectService etc.
/**
* Unified service function for streaming text.
* Handles client retrieval, retries, and fallback sequence.
*
* @param {object} params - Parameters for the service call.
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
* @param {object} [params.session=null] - Optional MCP session object.
* @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory.
* @param {string} params.prompt - The prompt for the AI.
* // ... include other standard streamText options as needed ...
* @returns {Promise<object>} The result from the AI SDK's streamText function (typically a Streamable object).
*/
async function streamTextService(params) {
const {
role: initialRole,
session,
overrideOptions,
...streamTextParams // Collect remaining params for streamText
} = params;
log('info', 'streamTextService called', { role: initialRole });
let sequence;
if (initialRole === 'main') {
sequence = ['main', 'fallback', 'research'];
} else if (initialRole === 'fallback') {
sequence = ['fallback', 'research'];
} else if (initialRole === 'research') {
sequence = ['research', 'fallback'];
} else {
log(
'warn',
`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`
);
sequence = ['main', 'fallback', 'research'];
}
let lastError = null;
for (const currentRole of sequence) {
log('info', `Attempting service call with role: ${currentRole}`);
let client;
try {
client = await getClient(currentRole, session, overrideOptions);
const clientInfo = {
provider: client?.provider || 'unknown',
model: client?.modelId || client?.model || 'unknown'
};
log('info', 'Retrieved AI client', clientInfo);
const result = await _attemptApiCallWithRetries(
client,
streamText, // Pass streamText function
streamTextParams,
currentRole
);
log('info', `streamTextService succeeded using role: ${currentRole}`);
return result;
} catch (error) {
log(
'error',
`Service call failed for role ${currentRole}: ${error.message}`
);
lastError = error;
if (!client) {
log(
'warn',
`Could not get client for role ${currentRole}, trying next role in sequence...`
);
} else {
log(
'warn',
`Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...`
);
}
}
}
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
throw (
lastError ||
new Error(
'AI service call (streamText) failed for all configured roles in the sequence.'
)
);
}
/**
* Unified service function for generating structured objects.
* Handles client retrieval, retries, and fallback sequence.
*
* @param {object} params - Parameters for the service call.
* @param {string} params.role - The initial client role ('main', 'research', 'fallback').
* @param {object} [params.session=null] - Optional MCP session object.
* @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory.
* @param {z.Schema} params.schema - The Zod schema for the expected object.
* @param {string} params.prompt - The prompt for the AI.
* // ... include other standard generateObject options as needed ...
* @returns {Promise<object>} The result from the AI SDK's generateObject function.
*/
async function generateObjectService(params) {
const {
role: initialRole,
session,
overrideOptions,
...generateObjectParams // Collect remaining params for generateObject
} = params;
log('info', 'generateObjectService called', { role: initialRole });
let sequence;
if (initialRole === 'main') {
sequence = ['main', 'fallback', 'research'];
} else if (initialRole === 'fallback') {
sequence = ['fallback', 'research'];
} else if (initialRole === 'research') {
sequence = ['research', 'fallback'];
} else {
log(
'warn',
`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`
);
sequence = ['main', 'fallback', 'research'];
}
let lastError = null;
for (const currentRole of sequence) {
log('info', `Attempting service call with role: ${currentRole}`);
let client;
try {
client = await getClient(currentRole, session, overrideOptions);
const clientInfo = {
provider: client?.provider || 'unknown',
model: client?.modelId || client?.model || 'unknown'
};
log('info', 'Retrieved AI client', clientInfo);
const result = await _attemptApiCallWithRetries(
client,
generateObject, // Pass generateObject function
generateObjectParams,
currentRole
);
log('info', `generateObjectService succeeded using role: ${currentRole}`);
return result;
} catch (error) {
log(
'error',
`Service call failed for role ${currentRole}: ${error.message}`
);
lastError = error;
if (!client) {
log(
'warn',
`Could not get client for role ${currentRole}, trying next role in sequence...`
);
} else {
log(
'warn',
`Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...`
);
}
}
}
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
throw (
lastError ||
new Error(
'AI service call (generateObject) failed for all configured roles in the sequence.'
)
);
}
export { generateTextService, streamTextService, generateObjectService };

View File

@@ -8,9 +8,18 @@
import { Anthropic } from '@anthropic-ai/sdk';
import OpenAI from 'openai';
import dotenv from 'dotenv';
import { CONFIG, log, sanitizePrompt, isSilentMode } from './utils.js';
import { log, sanitizePrompt, isSilentMode } from './utils.js';
import { startLoadingIndicator, stopLoadingIndicator } from './ui.js';
import chalk from 'chalk';
import {
getMainModelId,
getMainMaxTokens,
getMainTemperature,
getDebugFlag,
getResearchModelId,
getResearchMaxTokens,
getResearchTemperature
} from './config-manager.js';
// Load environment variables
dotenv.config();
@@ -218,7 +227,7 @@ Important: Your response must be valid JSON only, with no additional explanation
prdContent,
prdPath,
numTasks,
modelConfig?.maxTokens || CONFIG.maxTokens,
modelConfig?.maxTokens || getMainMaxTokens(null),
systemPrompt,
{ reportProgress, mcpLog, session },
aiClient || anthropic,
@@ -254,7 +263,7 @@ Important: Your response must be valid JSON only, with no additional explanation
);
} else {
console.error(chalk.red(userMessage));
if (CONFIG.debug) {
if (getDebugFlag(null)) {
log('debug', 'Full error:', error);
}
throw new Error(userMessage);
@@ -287,54 +296,46 @@ async function handleStreamingRequest(
aiClient = null,
modelConfig = null
) {
// Determine output format based on mcpLog presence
const outputFormat = mcpLog ? 'json' : 'text';
// Create custom reporter that checks for MCP log and silent mode
const report = (message, level = 'info') => {
if (mcpLog) {
if (mcpLog && typeof mcpLog[level] === 'function') {
mcpLog[level](message);
} else if (!isSilentMode() && outputFormat === 'text') {
// Only log to console if not in silent mode and outputFormat is 'text'
} else if (!isSilentMode()) {
log(level, message);
}
};
// Only show loading indicators for text output (CLI)
let loadingIndicator = null;
if (outputFormat === 'text' && !isSilentMode()) {
loadingIndicator = startLoadingIndicator('Generating tasks from PRD...');
let loadingIndicator;
if (!isSilentMode() && !mcpLog) {
loadingIndicator = startLoadingIndicator('Claude is thinking...');
}
if (reportProgress) {
await reportProgress({ progress: 0 });
}
let responseText = '';
let streamingInterval = null;
let textContent = '';
let finalResponse = null;
let claudeOverloaded = false;
try {
// Use streaming for handling large responses
const stream = await (aiClient || anthropic).messages.create({
model:
modelConfig?.model || session?.env?.ANTHROPIC_MODEL || CONFIG.model,
max_tokens:
modelConfig?.maxTokens || session?.env?.MAX_TOKENS || maxTokens,
temperature:
modelConfig?.temperature ||
session?.env?.TEMPERATURE ||
CONFIG.temperature,
const modelToUse = modelConfig?.modelId || getMainModelId(null);
const temperatureToUse =
modelConfig?.temperature || getMainTemperature(null);
const clientToUse = aiClient || anthropic;
report(`Using model: ${modelToUse} with temp: ${temperatureToUse}`);
const stream = await clientToUse.messages.stream({
model: modelToUse,
max_tokens: maxTokens,
temperature: temperatureToUse,
system: systemPrompt,
messages: [
{
role: 'user',
content: `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:\n\n${prdContent}`
}
],
stream: true
]
});
// Update loading indicator to show streaming progress - only for text output
if (outputFormat === 'text' && !isSilentMode()) {
let streamingInterval = null;
if (!isSilentMode() && process.stdout.isTTY) {
let dotCount = 0;
const readline = await import('readline');
streamingInterval = setInterval(() => {
@@ -346,64 +347,76 @@ async function handleStreamingRequest(
}, 500);
}
// Process the stream
for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
responseText += chunk.delta.text;
textContent += chunk.delta.text;
}
if (reportProgress) {
await reportProgress({
progress: (responseText.length / maxTokens) * 100
progress: (textContent.length / maxTokens) * 100
});
}
if (mcpLog) {
mcpLog.info(`Progress: ${(responseText.length / maxTokens) * 100}%`);
mcpLog.info(`Progress: ${(textContent.length / maxTokens) * 100}%`);
}
}
if (streamingInterval) clearInterval(streamingInterval);
// Only call stopLoadingIndicator if we started one
if (loadingIndicator && outputFormat === 'text' && !isSilentMode()) {
stopLoadingIndicator(loadingIndicator);
if (loadingIndicator) {
stopLoadingIndicator(
loadingIndicator,
'Claude processing finished',
true
);
loadingIndicator = null;
}
report(
`Completed streaming response from ${aiClient ? 'provided' : 'default'} AI client!`,
'info'
);
// Pass options to processClaudeResponse
return processClaudeResponse(
responseText,
finalResponse = processClaudeResponse(
textContent,
numTasks,
0,
prdContent,
prdPath,
{ reportProgress, mcpLog, session }
);
if (claudeOverloaded) {
report('Claude is overloaded, falling back to Perplexity', 'warn');
const perplexityClient = getPerplexityClient();
finalResponse = await handleStreamingRequest(
prdContent,
prdPath,
numTasks,
maxTokens,
systemPrompt,
{ reportProgress, mcpLog, session },
perplexityClient,
modelConfig
);
}
return finalResponse;
} catch (error) {
if (streamingInterval) clearInterval(streamingInterval);
// Only call stopLoadingIndicator if we started one
if (loadingIndicator && outputFormat === 'text' && !isSilentMode()) {
stopLoadingIndicator(loadingIndicator);
if (loadingIndicator) {
stopLoadingIndicator(loadingIndicator, 'Claude stream failed', false);
loadingIndicator = null;
}
// Get user-friendly error message
if (error.error?.type === 'overloaded_error') {
claudeOverloaded = true;
}
const userMessage = handleClaudeError(error);
report(`Error: ${userMessage}`, 'error');
report(userMessage, 'error');
// Only show console error for text output (CLI)
if (outputFormat === 'text' && !isSilentMode()) {
console.error(chalk.red(userMessage));
throw error;
} finally {
if (loadingIndicator) {
const success = !!finalResponse;
const message = success
? 'Claude stream finished'
: 'Claude stream ended';
stopLoadingIndicator(loadingIndicator, message, success);
}
if (CONFIG.debug && outputFormat === 'text' && !isSilentMode()) {
log('debug', 'Full error:', error);
}
throw new Error(userMessage);
}
}
@@ -528,18 +541,27 @@ async function generateSubtasks(
additionalContext = '',
{ reportProgress, mcpLog, session } = {}
) {
log('info', `Generating ${numSubtasks} subtasks for Task ${task.id}...`);
const report = (message, level = 'info') => {
if (mcpLog && typeof mcpLog[level] === 'function') {
mcpLog[level](message);
} else if (!isSilentMode()) {
log(level, message);
}
};
let loadingIndicator;
if (!isSilentMode() && !mcpLog) {
loadingIndicator = startLoadingIndicator(
'Claude is generating subtasks...'
);
}
const model = getMainModelId(null);
const maxTokens = getMainMaxTokens(null);
const temperature = getMainTemperature(null);
try {
log(
'info',
`Generating ${numSubtasks} subtasks for task ${task.id}: ${task.title}`
);
const loadingIndicator = startLoadingIndicator(
`Generating subtasks for task ${task.id}...`
);
let streamingInterval = null;
let responseText = '';
const systemPrompt = `You are an AI assistant helping with task breakdown for software development.
You need to break down a high-level task into ${numSubtasks} specific subtasks that can be implemented one by one.
@@ -585,72 +607,62 @@ Return exactly ${numSubtasks} subtasks with the following JSON structure:
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
try {
// Update loading indicator to show streaming progress
// Only create interval if not silent and stdout is a TTY
if (!isSilentMode() && process.stdout.isTTY) {
let dotCount = 0;
const readline = await import('readline');
streamingInterval = setInterval(() => {
readline.cursorTo(process.stdout, 0);
process.stdout.write(
`Generating subtasks for task ${task.id}${'.'.repeat(dotCount)}`
);
dotCount = (dotCount + 1) % 4;
}, 500);
}
// TODO: MOVE THIS TO THE STREAM REQUEST FUNCTION (DRY)
// Use streaming API call
const stream = await anthropic.messages.create({
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
system: systemPrompt,
messages: [
{
role: 'user',
content: userPrompt
}
],
stream: true
});
// Process the stream
for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
responseText += chunk.delta.text;
const stream = await anthropic.messages.create({
model: model,
max_tokens: maxTokens,
temperature: temperature,
system: systemPrompt,
messages: [
{
role: 'user',
content: userPrompt
}
if (reportProgress) {
await reportProgress({
progress: (responseText.length / CONFIG.maxTokens) * 100
});
}
if (mcpLog) {
mcpLog.info(
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
);
}
}
],
stream: true
});
if (streamingInterval) clearInterval(streamingInterval);
stopLoadingIndicator(loadingIndicator);
let responseText = '';
let streamingInterval = null;
log('info', `Completed generating subtasks for task ${task.id}`);
return parseSubtasksFromText(
responseText,
nextSubtaskId,
numSubtasks,
task.id
);
} catch (error) {
if (streamingInterval) clearInterval(streamingInterval);
stopLoadingIndicator(loadingIndicator);
throw error;
if (!isSilentMode() && process.stdout.isTTY) {
let dotCount = 0;
const readline = await import('readline');
streamingInterval = setInterval(() => {
readline.cursorTo(process.stdout, 0);
process.stdout.write(
`Generating subtasks for task ${task.id}${'.'.repeat(dotCount)}`
);
dotCount = (dotCount + 1) % 4;
}, 500);
}
for await (const chunk of stream) {
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
responseText += chunk.delta.text;
}
if (reportProgress) {
await reportProgress({
progress: (responseText.length / maxTokens) * 100
});
}
if (mcpLog) {
mcpLog.info(`Progress: ${(responseText.length / maxTokens) * 100}%`);
}
}
if (streamingInterval) clearInterval(streamingInterval);
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
log('info', `Completed generating subtasks for task ${task.id}`);
return parseSubtasksFromText(
responseText,
nextSubtaskId,
numSubtasks,
task.id
);
} catch (error) {
if (loadingIndicator) stopLoadingIndicator(loadingIndicator);
log('error', `Error generating subtasks: ${error.message}`);
throw error;
}

View File

@@ -13,7 +13,7 @@ import inquirer from 'inquirer';
import ora from 'ora';
import Table from 'cli-table3';
import { CONFIG, log, readJSON, writeJSON } from './utils.js';
import { log, readJSON, writeJSON } from './utils.js';
import {
parsePRD,
updateTasks,
@@ -45,16 +45,16 @@ import {
getMainModelId,
getResearchModelId,
getFallbackModelId,
setMainModel,
setResearchModel,
setFallbackModel,
getAvailableModels,
VALID_PROVIDERS,
getMainProvider,
getResearchProvider,
getFallbackProvider,
hasApiKeyForProvider,
getMcpApiKeyStatus
isApiKeySet,
getMcpApiKeyStatus,
getDebugFlag,
getConfig,
writeConfig
} from './config-manager.js';
import {
@@ -399,7 +399,8 @@ function registerCommands(programInstance) {
);
}
if (CONFIG.debug) {
// Use getDebugFlag getter instead of CONFIG.debug
if (getDebugFlag(null)) {
console.error(error);
}
@@ -554,7 +555,8 @@ function registerCommands(programInstance) {
);
}
if (CONFIG.debug) {
// Use getDebugFlag getter instead of CONFIG.debug
if (getDebugFlag(null)) {
console.error(error);
}
@@ -640,8 +642,8 @@ function registerCommands(programInstance) {
.option('-a, --all', 'Expand all tasks')
.option(
'-n, --num <number>',
'Number of subtasks to generate',
CONFIG.defaultSubtasks.toString()
'Number of subtasks to generate (default from config)',
'5' // Set a simple string default here
)
.option(
'--research',
@@ -657,7 +659,11 @@ function registerCommands(programInstance) {
)
.action(async (options) => {
const idArg = options.id;
const numSubtasks = options.num || CONFIG.defaultSubtasks;
// Get the actual default if the user didn't provide --num
const numSubtasks =
options.num === '5'
? getDefaultSubtasks(null)
: parseInt(options.num, 10);
const useResearch = options.research || false;
const additionalContext = options.prompt || '';
const forceFlag = options.force || false;
@@ -917,7 +923,7 @@ function registerCommands(programInstance) {
console.log(chalk.gray('Next: Complete this task or add more tasks'));
} catch (error) {
console.error(chalk.red(`Error adding task: ${error.message}`));
if (error.stack && CONFIG.debug) {
if (error.stack && getDebugFlag(null)) {
console.error(error.stack);
}
process.exit(1);
@@ -1583,13 +1589,13 @@ function registerCommands(programInstance) {
)
.option('--setup', 'Run interactive setup to configure models')
.action(async (options) => {
let modelSetAction = false; // Track if any set action was performed
let configModified = false; // Track if config needs saving
const availableModels = getAvailableModels(); // Get available models once
const currentConfig = getConfig(); // Load current config once
// Helper to find provider for a given model ID
const findProvider = (modelId) => {
const modelInfo = availableModels.find((m) => m.id === modelId);
return modelInfo?.provider;
const findModelData = (modelId) => {
return availableModels.find((m) => m.id === modelId);
};
try {
@@ -1601,27 +1607,27 @@ function registerCommands(programInstance) {
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
const modelData = findModelData(modelId);
if (!modelData || !modelData.provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
`Error: Model ID "${modelId}" not found or invalid in available models.`
)
);
process.exit(1);
}
if (setMainModel(provider, modelId)) {
// Call specific setter
console.log(
chalk.green(
`Main model set to: ${modelId} (Provider: ${provider})`
)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set main model.`));
process.exit(1);
}
// Update the loaded config object
currentConfig.models.main = {
...currentConfig.models.main, // Keep existing params like maxTokens
provider: modelData.provider,
modelId: modelId
};
console.log(
chalk.blue(
`Preparing to set main model to: ${modelId} (Provider: ${modelData.provider})`
)
);
configModified = true;
}
if (options.setResearch) {
@@ -1632,27 +1638,27 @@ function registerCommands(programInstance) {
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
const modelData = findModelData(modelId);
if (!modelData || !modelData.provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
`Error: Model ID "${modelId}" not found or invalid in available models.`
)
);
process.exit(1);
}
if (setResearchModel(provider, modelId)) {
// Call specific setter
console.log(
chalk.green(
`Research model set to: ${modelId} (Provider: ${provider})`
)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set research model.`));
process.exit(1);
}
// Update the loaded config object
currentConfig.models.research = {
...currentConfig.models.research, // Keep existing params like maxTokens
provider: modelData.provider,
modelId: modelId
};
console.log(
chalk.blue(
`Preparing to set research model to: ${modelId} (Provider: ${modelData.provider})`
)
);
configModified = true;
}
if (options.setFallback) {
@@ -1663,30 +1669,49 @@ function registerCommands(programInstance) {
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
const modelData = findModelData(modelId);
if (!modelData || !modelData.provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
`Error: Model ID "${modelId}" not found or invalid in available models.`
)
);
process.exit(1);
}
if (setFallbackModel(provider, modelId)) {
// Call specific setter
console.log(
chalk.green(
`Fallback model set to: ${modelId} (Provider: ${provider})`
)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set fallback model.`));
process.exit(1);
}
// Update the loaded config object
currentConfig.models.fallback = {
...currentConfig.models.fallback, // Keep existing params like maxTokens
provider: modelData.provider,
modelId: modelId
};
console.log(
chalk.blue(
`Preparing to set fallback model to: ${modelId} (Provider: ${modelData.provider})`
)
);
configModified = true;
}
// Handle interactive setup first
// If any config was modified, write it back to the file
if (configModified) {
if (writeConfig(currentConfig)) {
console.log(
chalk.green(
'Configuration successfully updated in .taskmasterconfig'
)
);
} else {
console.error(
chalk.red(
'Error writing updated configuration to .taskmasterconfig'
)
);
process.exit(1);
}
return; // Exit after successful set operation
}
// Handle interactive setup first (Keep existing setup logic)
if (options.setup) {
console.log(chalk.cyan.bold('\nInteractive Model Setup:'));
@@ -1817,8 +1842,8 @@ function registerCommands(programInstance) {
return; // Exit after setup
}
// If no set flags were used and not in setup mode, list the models
if (!modelSetAction && !options.setup) {
// If no set flags were used and not in setup mode, list the models (Keep existing list logic)
if (!configModified && !options.setup) {
// Fetch current settings
const mainProvider = getMainProvider();
const mainModelId = getMainModelId();
@@ -1828,12 +1853,12 @@ function registerCommands(programInstance) {
const fallbackModelId = getFallbackModelId(); // May be undefined
// Check API keys for both CLI (.env) and MCP (mcp.json)
const mainCliKeyOk = hasApiKeyForProvider(mainProvider);
const mainCliKeyOk = isApiKeySet(mainProvider); // <-- Use correct function name
const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider);
const researchCliKeyOk = hasApiKeyForProvider(researchProvider);
const researchCliKeyOk = isApiKeySet(researchProvider); // <-- Use correct function name
const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider);
const fallbackCliKeyOk = fallbackProvider
? hasApiKeyForProvider(fallbackProvider)
? isApiKeySet(fallbackProvider) // <-- Use correct function name
: true; // No key needed if no fallback is set
const fallbackMcpKeyOk = fallbackProvider
? getMcpApiKeyStatus(fallbackProvider)
@@ -2080,7 +2105,7 @@ function registerCommands(programInstance) {
}
} catch (error) {
log(`Error processing models command: ${error.message}`, 'error');
if (error.stack && CONFIG.debug) {
if (error.stack && getDebugFlag(null)) {
log(error.stack, 'debug');
}
process.exit(1);
@@ -2100,7 +2125,7 @@ function setupCLI() {
.name('dev')
.description('AI-driven development task management')
.version(() => {
// Read version directly from package.json
// Read version directly from package.json ONLY
try {
const packageJsonPath = path.join(process.cwd(), 'package.json');
if (fs.existsSync(packageJsonPath)) {
@@ -2110,9 +2135,13 @@ function setupCLI() {
return packageJson.version;
}
} catch (error) {
// Silently fall back to default version
// Silently fall back to 'unknown'
log(
'warn',
'Could not read package.json for version info in .version()'
);
}
return CONFIG.projectVersion; // Default fallback
return 'unknown'; // Default fallback if package.json fails
})
.helpOption('-h, --help', 'Display help')
.addHelpCommand(false) // Disable default help command
@@ -2141,16 +2170,21 @@ function setupCLI() {
* @returns {Promise<{currentVersion: string, latestVersion: string, needsUpdate: boolean}>}
*/
async function checkForUpdate() {
// Get current version from package.json
let currentVersion = CONFIG.projectVersion;
// Get current version from package.json ONLY
let currentVersion = 'unknown'; // Initialize with a default
try {
// Try to get the version from the installed package
const packageJsonPath = path.join(
// Try to get the version from the installed package (if applicable) or current dir
let packageJsonPath = path.join(
process.cwd(),
'node_modules',
'task-master-ai',
'package.json'
);
// Fallback to current directory package.json if not found in node_modules
if (!fs.existsSync(packageJsonPath)) {
packageJsonPath = path.join(process.cwd(), 'package.json');
}
if (fs.existsSync(packageJsonPath)) {
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
currentVersion = packageJson.version;
@@ -2303,7 +2337,7 @@ async function runCLI(argv = process.argv) {
} catch (error) {
console.error(chalk.red(`Error: ${error.message}`));
if (CONFIG.debug) {
if (getDebugFlag(null)) {
console.error(error);
}

View File

@@ -2,6 +2,14 @@ import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
import { fileURLToPath } from 'url';
import { ZodError } from 'zod';
import {
log,
readJSON,
writeJSON,
resolveEnvVariable,
findProjectRoot
} from './utils.js';
// Calculate __dirname in ESM
const __filename = fileURLToPath(import.meta.url);
@@ -28,63 +36,49 @@ try {
const CONFIG_FILE_NAME = '.taskmasterconfig';
// Default configuration
const DEFAULT_MAIN_PROVIDER = 'anthropic';
const DEFAULT_MAIN_MODEL_ID = 'claude-3.7-sonnet-20250219';
const DEFAULT_RESEARCH_PROVIDER = 'perplexity';
const DEFAULT_RESEARCH_MODEL_ID = 'sonar-pro';
// Define valid providers dynamically from the loaded MODEL_MAP
const VALID_PROVIDERS = Object.keys(MODEL_MAP);
// Define ONE list of all supported providers
const VALID_PROVIDERS = [
'anthropic',
'openai',
'google',
'perplexity',
'ollama',
'openrouter',
'grok'
];
let projectRoot = null;
function findProjectRoot() {
// Keep this function as is for CLI context
if (projectRoot) return projectRoot;
let currentDir = process.cwd();
while (currentDir !== path.parse(currentDir).root) {
if (fs.existsSync(path.join(currentDir, 'package.json'))) {
projectRoot = currentDir;
return projectRoot;
// Default configuration values (used if .taskmasterconfig is missing or incomplete)
const DEFAULTS = {
models: {
main: {
provider: 'anthropic',
modelId: 'claude-3-7-sonnet-20250219',
maxTokens: 64000,
temperature: 0.2
},
research: {
provider: 'perplexity',
modelId: 'sonar-pro',
maxTokens: 8700,
temperature: 0.1
},
fallback: {
// No default fallback provider/model initially
provider: 'anthropic',
modelId: 'claude-3-5-sonnet',
maxTokens: 64000, // Default parameters if fallback IS configured
temperature: 0.2
}
currentDir = path.dirname(currentDir);
},
global: {
logLevel: 'info',
debug: false,
defaultSubtasks: 5,
defaultPriority: 'medium',
projectName: 'Task Master',
ollamaBaseUrl: 'http://localhost:11434/api'
}
};
// Check root directory as a last resort
if (fs.existsSync(path.join(currentDir, 'package.json'))) {
projectRoot = currentDir;
return projectRoot;
}
// --- Internal Config Loading ---
let loadedConfig = null; // Cache for loaded config
// If still not found, maybe look for other markers or return null
// For now, returning null if package.json isn't found up to the root
projectRoot = null;
return null;
}
function readConfig(explicitRoot = null) {
function _loadAndValidateConfig(explicitRoot = null) {
// Determine the root path to use
const rootToUse = explicitRoot || findProjectRoot();
const defaults = {
models: {
main: { provider: DEFAULT_MAIN_PROVIDER, modelId: DEFAULT_MAIN_MODEL_ID },
research: {
provider: DEFAULT_RESEARCH_PROVIDER,
modelId: DEFAULT_RESEARCH_MODEL_ID
}
}
};
const defaults = DEFAULTS; // Use the defined defaults
if (!rootToUse) {
console.warn(
@@ -101,75 +95,60 @@ function readConfig(explicitRoot = null) {
const rawData = fs.readFileSync(configPath, 'utf-8');
const parsedConfig = JSON.parse(rawData);
// Deep merge defaults to ensure structure and handle partial configs
// Deep merge with defaults
const config = {
models: {
main: {
provider:
parsedConfig?.models?.main?.provider ??
defaults.models.main.provider,
modelId:
parsedConfig?.models?.main?.modelId ??
defaults.models.main.modelId
},
main: { ...defaults.models.main, ...parsedConfig?.models?.main },
research: {
provider:
parsedConfig?.models?.research?.provider ??
defaults.models.research.provider,
modelId:
parsedConfig?.models?.research?.modelId ??
defaults.models.research.modelId
...defaults.models.research,
...parsedConfig?.models?.research
},
// Add merge logic for the fallback model
fallback: {
provider: parsedConfig?.models?.fallback?.provider,
modelId: parsedConfig?.models?.fallback?.modelId
}
}
// Fallback needs careful merging - only merge if provider/model exist
fallback:
parsedConfig?.models?.fallback?.provider &&
parsedConfig?.models?.fallback?.modelId
? { ...defaults.models.fallback, ...parsedConfig.models.fallback }
: { ...defaults.models.fallback } // Use default params even if provider/model missing
},
global: { ...defaults.global, ...parsedConfig?.global }
};
// Validate loaded providers (main, research, and fallback if it exists)
// --- Validation ---
// Validate main provider/model
if (!validateProvider(config.models.main.provider)) {
console.warn(
chalk.yellow(
`Warning: Invalid main provider "${config.models.main.provider}" in ${CONFIG_FILE_NAME}. Falling back to default.`
)
);
config.models.main = {
provider: defaults.models.main.provider,
modelId: defaults.models.main.modelId
};
config.models.main = { ...defaults.models.main };
}
// Optional: Add warning for model combination if desired, but don't block
// else if (!validateProviderModelCombination(config.models.main.provider, config.models.main.modelId)) { ... }
// Optional: Add warning for model combination if desired
// Validate research provider/model
if (!validateProvider(config.models.research.provider)) {
console.warn(
chalk.yellow(
`Warning: Invalid research provider "${config.models.research.provider}" in ${CONFIG_FILE_NAME}. Falling back to default.`
)
);
config.models.research = {
provider: defaults.models.research.provider,
modelId: defaults.models.research.modelId
};
config.models.research = { ...defaults.models.research };
}
// Optional: Add warning for model combination if desired, but don't block
// else if (!validateProviderModelCombination(config.models.research.provider, config.models.research.modelId)) { ... }
// Optional: Add warning for model combination if desired
// Add validation for fallback provider if it exists
// Validate fallback provider if it exists
if (
config.models.fallback &&
config.models.fallback.provider &&
config.models.fallback?.provider &&
!validateProvider(config.models.fallback.provider)
) {
console.warn(
chalk.yellow(
`Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${CONFIG_FILE_NAME}. Fallback model will be ignored.`
`Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${CONFIG_FILE_NAME}. Fallback model configuration will be ignored.`
)
);
// Unlike main/research, we don't set a default fallback, just ignore it
delete config.models.fallback;
// Clear invalid fallback provider/model, but keep default params if needed elsewhere
config.models.fallback.provider = undefined;
config.models.fallback.modelId = undefined;
}
return config;
@@ -182,10 +161,28 @@ function readConfig(explicitRoot = null) {
return defaults;
}
} else {
// Config file doesn't exist, use defaults
return defaults;
}
}
/**
* Gets the current configuration, loading it if necessary.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @param {boolean} forceReload - Force reloading the config file.
* @returns {object} The loaded configuration object.
*/
function getConfig(explicitRoot = null, forceReload = false) {
if (!loadedConfig || forceReload) {
loadedConfig = _loadAndValidateConfig(explicitRoot);
}
// If an explicitRoot was provided for a one-off check, don't cache it permanently
if (explicitRoot && !forceReload) {
return _loadAndValidateConfig(explicitRoot);
}
return loadedConfig;
}
/**
* Validates if a provider name is in the list of supported providers.
* @param {string} providerName The name of the provider.
@@ -215,402 +212,134 @@ function validateProviderModelCombination(providerName, modelId) {
);
}
/**
* Gets the currently configured main AI provider.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The name of the main provider.
*/
// --- Role-Specific Getters ---
function getModelConfigForRole(role, explicitRoot = null) {
const config = getConfig(explicitRoot);
const roleConfig = config?.models?.[role];
if (!roleConfig) {
log('warn', `No model configuration found for role: ${role}`);
return DEFAULTS.models[role] || {}; // Fallback to default for the role
}
return roleConfig;
}
function getMainProvider(explicitRoot = null) {
const config = readConfig(explicitRoot);
return config.models.main.provider;
return getModelConfigForRole('main', explicitRoot).provider;
}
/**
* Gets the currently configured main AI model ID.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The ID of the main model.
*/
function getMainModelId(explicitRoot = null) {
const config = readConfig(explicitRoot);
return config.models.main.modelId;
return getModelConfigForRole('main', explicitRoot).modelId;
}
function getMainMaxTokens(explicitRoot = null) {
return getModelConfigForRole('main', explicitRoot).maxTokens;
}
function getMainTemperature(explicitRoot = null) {
return getModelConfigForRole('main', explicitRoot).temperature;
}
/**
* Gets the currently configured research AI provider.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The name of the research provider.
*/
function getResearchProvider(explicitRoot = null) {
const config = readConfig(explicitRoot);
return config.models.research.provider;
return getModelConfigForRole('research', explicitRoot).provider;
}
/**
* Gets the currently configured research AI model ID.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string} The ID of the research model.
*/
function getResearchModelId(explicitRoot = null) {
const config = readConfig(explicitRoot);
return config.models.research.modelId;
return getModelConfigForRole('research', explicitRoot).modelId;
}
function getResearchMaxTokens(explicitRoot = null) {
return getModelConfigForRole('research', explicitRoot).maxTokens;
}
function getResearchTemperature(explicitRoot = null) {
return getModelConfigForRole('research', explicitRoot).temperature;
}
/**
* Gets the currently configured fallback AI provider.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|undefined} The name of the fallback provider, or undefined if not set.
*/
function getFallbackProvider(explicitRoot = null) {
const config = readConfig(explicitRoot);
return config.models?.fallback?.provider;
// Specifically check if provider is set, as fallback is optional
return getModelConfigForRole('fallback', explicitRoot).provider || undefined;
}
/**
* Gets the currently configured fallback AI model ID.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|undefined} The ID of the fallback model, or undefined if not set.
*/
function getFallbackModelId(explicitRoot = null) {
const config = readConfig(explicitRoot);
return config.models?.fallback?.modelId;
// Specifically check if modelId is set
return getModelConfigForRole('fallback', explicitRoot).modelId || undefined;
}
function getFallbackMaxTokens(explicitRoot = null) {
// Return fallback tokens even if provider/model isn't set, in case it's needed generically
return getModelConfigForRole('fallback', explicitRoot).maxTokens;
}
function getFallbackTemperature(explicitRoot = null) {
// Return fallback temp even if provider/model isn't set
return getModelConfigForRole('fallback', explicitRoot).temperature;
}
// --- Global Settings Getters ---
function getGlobalConfig(explicitRoot = null) {
const config = getConfig(explicitRoot);
return config?.global || DEFAULTS.global;
}
function getLogLevel(explicitRoot = null) {
return getGlobalConfig(explicitRoot).logLevel;
}
function getDebugFlag(explicitRoot = null) {
// Ensure boolean type
return getGlobalConfig(explicitRoot).debug === true;
}
function getDefaultSubtasks(explicitRoot = null) {
// Ensure integer type
return parseInt(getGlobalConfig(explicitRoot).defaultSubtasks, 10);
}
function getDefaultPriority(explicitRoot = null) {
return getGlobalConfig(explicitRoot).defaultPriority;
}
function getProjectName(explicitRoot = null) {
return getGlobalConfig(explicitRoot).projectName;
}
function getOllamaBaseUrl(explicitRoot = null) {
return getGlobalConfig(explicitRoot).ollamaBaseUrl;
}
/**
* Sets the main AI model (provider and modelId) in the configuration file.
* @param {string} providerName The name of the provider to set.
* @param {string} modelId The ID of the model to set.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
* Checks if the API key for a given provider is set in the environment.
* Checks process.env first, then session.env if session is provided.
* @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').
* @param {object|null} [session=null] - The MCP session object (optional).
* @returns {boolean} True if the API key is set, false otherwise.
*/
function setMainModel(providerName, modelId, explicitRoot = null) {
// --- 1. Validate Provider First ---
if (!validateProvider(providerName)) {
console.error(
chalk.red(`Error: "${providerName}" is not a valid provider.`)
);
console.log(
chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`)
);
function isApiKeySet(providerName, session = null) {
// Define the expected environment variable name for each provider
const keyMap = {
openai: 'OPENAI_API_KEY',
anthropic: 'ANTHROPIC_API_KEY',
google: 'GOOGLE_API_KEY',
perplexity: 'PERPLEXITY_API_KEY',
grok: 'GROK_API_KEY', // Assuming GROK_API_KEY based on env.example
mistral: 'MISTRAL_API_KEY',
azure: 'AZURE_OPENAI_API_KEY', // Azure needs endpoint too, but key presence is a start
openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY'
// Add other providers as needed
};
const providerKey = providerName?.toLowerCase();
if (!providerKey || !keyMap[providerKey]) {
log('warn', `Unknown provider name: ${providerName} in isApiKeySet check.`);
return false;
}
// --- 2. Validate Role Second ---
const allModels = getAvailableModels(); // Get all models to check roles
const modelData = allModels.find(
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('main')
) {
console.error(
chalk.red(`Error: Model "${modelId}" is not allowed for the 'main' role.`)
);
// Try to suggest valid models for the role
const allowedMainModels = allModels
.filter((m) => m.allowed_roles?.includes('main'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedMainModels) {
console.log(
chalk.yellow('\nAllowed models for main role:\n' + allowedMainModels)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) {
console.warn(
chalk.yellow(
`Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.`
)
);
}
// --- Proceed with setting ---
const config = readConfig(explicitRoot);
config.models.main = { provider: providerName, modelId: modelId };
// Pass explicitRoot down
if (writeConfig(config, explicitRoot)) {
console.log(
chalk.green(`Main AI model set to: ${providerName} / ${modelId}`)
);
return true;
} else {
return false;
}
}
/**
* Sets the research AI model (provider and modelId) in the configuration file.
* @param {string} providerName The name of the provider to set.
* @param {string} modelId The ID of the model to set.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function setResearchModel(providerName, modelId, explicitRoot = null) {
// --- 1. Validate Provider First ---
if (!validateProvider(providerName)) {
console.error(
chalk.red(`Error: "${providerName}" is not a valid provider.`)
);
console.log(
chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`)
);
return false;
}
// --- 2. Validate Role Second ---
const allModels = getAvailableModels(); // Get all models to check roles
const modelData = allModels.find(
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('research')
) {
console.error(
chalk.red(
`Error: Model "${modelId}" is not allowed for the 'research' role.`
)
);
// Try to suggest valid models for the role
const allowedResearchModels = allModels
.filter((m) => m.allowed_roles?.includes('research'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedResearchModels) {
console.log(
chalk.yellow(
'\nAllowed models for research role:\n' + allowedResearchModels
)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) {
console.warn(
chalk.yellow(
`Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.`
)
);
}
// --- 4. Specific Research Warning (Optional) ---
if (
providerName === 'anthropic' ||
(providerName === 'openai' && modelId.includes('3.5'))
) {
console.warn(
chalk.yellow(
`Warning: Provider "${providerName}" with model "${modelId}" may not be ideal for research tasks. Perplexity or Grok recommended.`
)
);
}
// --- Proceed with setting ---
const config = readConfig(explicitRoot);
config.models.research = { provider: providerName, modelId: modelId };
// Pass explicitRoot down
if (writeConfig(config, explicitRoot)) {
console.log(
chalk.green(`Research AI model set to: ${providerName} / ${modelId}`)
);
return true;
} else {
return false;
}
}
/**
* Sets the fallback AI model (provider and modelId) in the configuration file.
* @param {string} providerName The name of the provider to set.
* @param {string} modelId The ID of the model to set.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function setFallbackModel(providerName, modelId, explicitRoot = null) {
// --- 1. Validate Provider First ---
if (!validateProvider(providerName)) {
console.error(
chalk.red(`Error: "${providerName}" is not a valid provider.`)
);
console.log(
chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`)
);
return false;
}
// --- 2. Validate Role Second ---
const allModels = getAvailableModels(); // Get all models to check roles
const modelData = allModels.find(
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('fallback')
) {
console.error(
chalk.red(
`Error: Model "${modelId}" is not allowed for the 'fallback' role.`
)
);
// Try to suggest valid models for the role
const allowedFallbackModels = allModels
.filter((m) => m.allowed_roles?.includes('fallback'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedFallbackModels) {
console.log(
chalk.yellow(
'\nAllowed models for fallback role:\n' + allowedFallbackModels
)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) {
console.warn(
chalk.yellow(
`Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.`
)
);
}
// --- Proceed with setting ---
const config = readConfig(explicitRoot);
if (!config.models) {
config.models = {}; // Ensure models object exists
}
// Ensure fallback object exists
if (!config.models.fallback) {
config.models.fallback = {};
}
config.models.fallback = { provider: providerName, modelId: modelId };
return writeConfig(config, explicitRoot);
}
/**
* Gets a list of available models based on the MODEL_MAP.
* @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}
*/
function getAvailableModels() {
const available = [];
for (const [provider, models] of Object.entries(MODEL_MAP)) {
if (models.length > 0) {
models.forEach((modelObj) => {
// Basic name generation - can be improved
const modelId = modelObj.id;
const sweScore = modelObj.swe_score;
const cost = modelObj.cost_per_1m_tokens;
const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
const nameParts = modelId
.split('-')
.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
// Handle specific known names better if needed
let name = nameParts.join(' ');
if (modelId === 'claude-3.5-sonnet-20240620')
name = 'Claude 3.5 Sonnet';
if (modelId === 'claude-3-7-sonnet-20250219')
name = 'Claude 3.7 Sonnet';
if (modelId === 'gpt-4o') name = 'GPT-4o';
if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';
available.push({
id: modelId,
name: name,
provider: provider,
swe_score: sweScore,
cost_per_1m_tokens: cost,
allowed_roles: allowedRoles
});
});
} else {
// For providers with empty lists (like ollama), maybe add a placeholder or skip
available.push({
id: `[${provider}-any]`,
name: `Any (${provider})`,
provider: provider
});
}
}
return available;
}
/**
* Writes the configuration object to the file.
* @param {Object} config The configuration object to write.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function writeConfig(config, explicitRoot = null) {
const rootPath = explicitRoot || findProjectRoot();
if (!rootPath) {
console.error(
chalk.red(
'Error: Could not determine project root. Configuration not saved.'
)
);
return false;
}
// Ensure we don't double-join if explicitRoot already contains the filename
const configPath =
path.basename(rootPath) === CONFIG_FILE_NAME
? rootPath
: path.join(rootPath, CONFIG_FILE_NAME);
try {
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
return true;
} catch (error) {
console.error(
chalk.red(
`Error writing configuration to ${configPath}: ${error.message}`
)
);
return false;
}
}
/**
* Checks if the required API key environment variable is set for a given provider.
* @param {string} providerName The name of the provider.
* @returns {boolean} True if the API key environment variable exists and is non-empty, false otherwise.
*/
function hasApiKeyForProvider(providerName) {
switch (providerName) {
case 'anthropic':
return !!process.env.ANTHROPIC_API_KEY;
case 'openai':
case 'openrouter': // OpenRouter uses OpenAI-compatible key
return !!process.env.OPENAI_API_KEY;
case 'google':
return !!process.env.GOOGLE_API_KEY;
case 'perplexity':
return !!process.env.PERPLEXITY_API_KEY;
case 'grok':
case 'xai': // Added alias for Grok
return !!process.env.GROK_API_KEY;
case 'ollama':
return true; // Ollama runs locally, no cloud API key needed
default:
return false; // Unknown provider cannot have a key checked
}
const envVarName = keyMap[providerKey];
// Use resolveEnvVariable to check both process.env and session.env
return !!resolveEnvVariable(envVarName, session);
}
/**
@@ -685,24 +414,125 @@ function getMcpApiKeyStatus(providerName) {
}
}
/**
* Gets a list of available models based on the MODEL_MAP.
* @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}
*/
function getAvailableModels() {
const available = [];
for (const [provider, models] of Object.entries(MODEL_MAP)) {
if (models.length > 0) {
models.forEach((modelObj) => {
// Basic name generation - can be improved
const modelId = modelObj.id;
const sweScore = modelObj.swe_score;
const cost = modelObj.cost_per_1m_tokens;
const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
const nameParts = modelId
.split('-')
.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
// Handle specific known names better if needed
let name = nameParts.join(' ');
if (modelId === 'claude-3.5-sonnet-20240620')
name = 'Claude 3.5 Sonnet';
if (modelId === 'claude-3-7-sonnet-20250219')
name = 'Claude 3.7 Sonnet';
if (modelId === 'gpt-4o') name = 'GPT-4o';
if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';
available.push({
id: modelId,
name: name,
provider: provider,
swe_score: sweScore,
cost_per_1m_tokens: cost,
allowed_roles: allowedRoles
});
});
} else {
// For providers with empty lists (like ollama), maybe add a placeholder or skip
available.push({
id: `[${provider}-any]`,
name: `Any (${provider})`,
provider: provider
});
}
}
return available;
}
/**
* Writes the configuration object to the file.
* @param {Object} config The configuration object to write.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function writeConfig(config, explicitRoot = null) {
const rootPath = explicitRoot || findProjectRoot();
if (!rootPath) {
console.error(
chalk.red(
'Error: Could not determine project root. Configuration not saved.'
)
);
return false;
}
const configPath =
path.basename(rootPath) === CONFIG_FILE_NAME
? rootPath
: path.join(rootPath, CONFIG_FILE_NAME);
try {
fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
loadedConfig = config; // Update the cache after successful write
return true;
} catch (error) {
console.error(
chalk.red(
`Error writing configuration to ${configPath}: ${error.message}`
)
);
return false;
}
}
export {
// Not exporting findProjectRoot as it's internal for CLI context now
readConfig, // Keep exporting if direct access is needed elsewhere
writeConfig, // Keep exporting if direct access is needed elsewhere
// Core config access
getConfig, // Might still be useful for getting the whole object
writeConfig,
// Validation
validateProvider,
validateProviderModelCombination,
getMainProvider,
getMainModelId,
getResearchProvider,
getResearchModelId,
getFallbackProvider,
getFallbackModelId,
setMainModel,
setResearchModel,
setFallbackModel,
VALID_PROVIDERS,
MODEL_MAP,
getAvailableModels,
hasApiKeyForProvider,
// Role-specific getters
getMainProvider,
getMainModelId,
getMainMaxTokens,
getMainTemperature,
getResearchProvider,
getResearchModelId,
getResearchMaxTokens,
getResearchTemperature,
getFallbackProvider,
getFallbackModelId,
getFallbackMaxTokens,
getFallbackTemperature,
// Global setting getters
getLogLevel,
getDebugFlag,
getDefaultSubtasks,
getDefaultPriority,
getProjectName,
getOllamaBaseUrl,
// API Key Checkers (still relevant)
isApiKeySet,
getMcpApiKeyStatus
};

View File

@@ -14,7 +14,6 @@ import ora from 'ora';
import inquirer from 'inquirer';
import {
CONFIG,
log,
readJSON,
writeJSON,
@@ -86,6 +85,14 @@ try {
log('warn', 'Research-backed features will not be available');
}
// Import necessary config getters
import {
getDebugFlag,
getDefaultSubtasks,
getDefaultPriority
// Add other getters here as needed later
} from './config-manager.js';
/**
* Parse a PRD file and generate tasks
* @param {string} prdPath - Path to the PRD file
@@ -196,7 +203,8 @@ async function parsePRD(
if (outputFormat === 'text') {
console.error(chalk.red(`Error: ${error.message}`));
if (CONFIG.debug) {
if (getDebugFlag()) {
// Use getter
console.error(error);
}
@@ -675,7 +683,8 @@ Return only the updated task as a valid JSON object.`
console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');
}
if (CONFIG.debug) {
if (getDebugFlag()) {
// Use getter
console.error(error);
}
@@ -1337,7 +1346,8 @@ Return only the updated task as a valid JSON object.`
console.log(' 2. Use a valid task ID with the --id parameter');
}
if (CONFIG.debug) {
if (getDebugFlag()) {
// Use getter
console.error(error);
}
} else {
@@ -1484,7 +1494,8 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) {
if (!options?.mcpLog) {
console.error(chalk.red(`Error generating task files: ${error.message}`));
if (CONFIG.debug) {
if (getDebugFlag()) {
// Use getter
console.error(error);
}
@@ -1584,7 +1595,8 @@ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
if (!options?.mcpLog) {
console.error(chalk.red(`Error: ${error.message}`));
if (CONFIG.debug) {
if (getDebugFlag()) {
// Use getter
console.error(error);
}
@@ -2477,7 +2489,7 @@ async function expandTask(
}
// Determine the number of subtasks to generate
let subtaskCount = parseInt(numSubtasks, 10) || CONFIG.defaultSubtasks;
let subtaskCount = parseInt(numSubtasks, 10) || getDefaultSubtasks(); // Use getter
// Check if we have a complexity analysis for this task
let taskAnalysis = null;
@@ -2504,7 +2516,7 @@ async function expandTask(
// Use recommended number of subtasks if available
if (
taskAnalysis.recommendedSubtasks &&
subtaskCount === CONFIG.defaultSubtasks
subtaskCount === getDefaultSubtasks() // Use getter
) {
subtaskCount = taskAnalysis.recommendedSubtasks;
report(`Using recommended number of subtasks: ${subtaskCount}`);
@@ -2672,7 +2684,7 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
*/
async function expandAllTasks(
tasksPath,
numSubtasks = CONFIG.defaultSubtasks,
numSubtasks = getDefaultSubtasks(), // Use getter
useResearch = false,
additionalContext = '',
forceFlag = false,
@@ -2698,7 +2710,7 @@ async function expandAllTasks(
if (typeof numSubtasks === 'string') {
numSubtasks = parseInt(numSubtasks, 10);
if (isNaN(numSubtasks)) {
numSubtasks = CONFIG.defaultSubtasks;
numSubtasks = getDefaultSubtasks(); // Use getter
}
}
@@ -3127,7 +3139,7 @@ async function addTask(
tasksPath,
prompt,
dependencies = [],
priority = 'medium',
priority = getDefaultPriority(), // Use getter
{ reportProgress, mcpLog, session } = {},
outputFormat = 'text',
customEnv = null,
@@ -4415,7 +4427,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
console.error(
chalk.red(`Error parsing complexity analysis: ${error.message}`)
);
if (CONFIG.debug) {
if (getDebugFlag()) {
// Use getter
console.debug(
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
);
@@ -4460,7 +4473,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
);
}
if (CONFIG.debug) {
if (getDebugFlag()) {
// Use getter
console.error(error);
}
@@ -5382,7 +5396,8 @@ Provide concrete examples, code snippets, or implementation details when relevan
);
}
if (CONFIG.debug) {
if (getDebugFlag()) {
// Use getter
console.error(error);
}
} else {

View File

@@ -1,32 +0,0 @@
async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) {
let loadingIndicator = null;
try {
log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`);
// Validate subtask ID format
if (!subtaskId || typeof subtaskId !== 'string' || !subtaskId.includes('.')) {
throw new Error(`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId"`);
}
// Validate prompt
if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {
throw new Error('Prompt cannot be empty. Please provide context for the subtask update.');
}
// Prepare for fallback handling
let claudeOverloaded = false;
// Validate tasks file exists
if (!fs.existsSync(tasksPath)) {
throw new Error(`Tasks file not found at path: ${tasksPath}`);
}
// Read the tasks file
const data = readJSON(tasksPath);
// ... rest of the function
} catch (error) {
// Handle errors
console.error(`Error updating subtask: ${error.message}`);
throw error;
}
}

View File

@@ -10,7 +10,6 @@ import ora from 'ora';
import Table from 'cli-table3';
import gradient from 'gradient-string';
import {
CONFIG,
log,
findTaskById,
readJSON,
@@ -20,6 +19,7 @@ import {
import path from 'path';
import fs from 'fs';
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
// Create a color gradient for the banner
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
@@ -44,7 +44,7 @@ function displayBanner() {
);
// Read version directly from package.json
let version = CONFIG.projectVersion; // Default fallback
let version = 'unknown'; // Initialize with a default
try {
const packageJsonPath = path.join(process.cwd(), 'package.json');
if (fs.existsSync(packageJsonPath)) {
@@ -53,12 +53,13 @@ function displayBanner() {
}
} catch (error) {
// Silently fall back to default version
log('warn', 'Could not read package.json for version info.');
}
console.log(
boxen(
chalk.white(
`${chalk.bold('Version:')} ${version} ${chalk.bold('Project:')} ${CONFIG.projectName}`
`${chalk.bold('Version:')} ${version} ${chalk.bold('Project:')} ${getProjectName(null)}`
),
{
padding: 1,
@@ -1652,6 +1653,45 @@ async function displayComplexityReport(reportPath) {
);
}
/**
* Generate a prompt for complexity analysis
* @param {Object} tasksData - Tasks data object containing tasks array
* @returns {string} Generated prompt
*/
function generateComplexityAnalysisPrompt(tasksData) {
const defaultSubtasks = getDefaultSubtasks(null); // Use the getter
return `Analyze the complexity of the following tasks and provide recommendations for subtask breakdown:
${tasksData.tasks
.map(
(task) => `
Task ID: ${task.id}
Title: ${task.title}
Description: ${task.description}
Details: ${task.details}
Dependencies: ${JSON.stringify(task.dependencies || [])}
Priority: ${task.priority || 'medium'}
`
)
.join('\n---\n')}
Analyze each task and return a JSON array with the following structure for each task:
[
{
"taskId": number,
"taskTitle": string,
"complexityScore": number (1-10),
"recommendedSubtasks": number (${Math.max(3, defaultSubtasks - 1)}-${Math.min(8, defaultSubtasks + 2)}),
"expansionPrompt": string (a specific prompt for generating good subtasks),
"reasoning": string (brief explanation of your assessment)
},
...
]
IMPORTANT: Make sure to include an analysis for EVERY task listed above, with the correct taskId matching each task's ID.
`;
}
/**
* Confirm overwriting existing tasks.json file
* @param {string} tasksPath - Path to the tasks.json file
@@ -1706,5 +1746,6 @@ export {
displayNextTask,
displayTaskById,
displayComplexityReport,
generateComplexityAnalysisPrompt,
confirmTaskOverwrite
};

View File

@@ -6,22 +6,61 @@
import fs from 'fs';
import path from 'path';
import chalk from 'chalk';
import { ZodError } from 'zod';
// Import specific config getters needed here
import { getLogLevel, getDebugFlag } from './config-manager.js';
// Global silent mode flag
let silentMode = false;
// Configuration and constants
const CONFIG = {
model: process.env.MODEL || 'claude-3-7-sonnet-20250219',
maxTokens: parseInt(process.env.MAX_TOKENS || '4000'),
temperature: parseFloat(process.env.TEMPERATURE || '0.7'),
debug: process.env.DEBUG === 'true',
logLevel: process.env.LOG_LEVEL || 'info',
defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || '3'),
defaultPriority: process.env.DEFAULT_PRIORITY || 'medium',
projectName: process.env.PROJECT_NAME || 'Task Master',
projectVersion: '1.5.0' // Hardcoded version - ALWAYS use this value, ignore environment variable
};
// --- Environment Variable Resolution Utility ---
/**
* Resolves an environment variable by checking process.env first, then session.env.
* @param {string} varName - The name of the environment variable.
* @param {string|null} session - The MCP session object (optional).
* @returns {string|undefined} The value of the environment variable or undefined if not found.
*/
function resolveEnvVariable(varName, session) {
// Ensure session and session.env exist before attempting access
const sessionValue =
session && session.env ? session.env[varName] : undefined;
return process.env[varName] ?? sessionValue;
}
// --- Project Root Finding Utility ---
/**
* Finds the project root directory by searching upwards from a given starting point
* for a marker file or directory (e.g., 'package.json', '.git').
* @param {string} [startPath=process.cwd()] - The directory to start searching from.
* @param {string[]} [markers=['package.json', '.git', '.taskmasterconfig']] - Marker files/dirs to look for.
* @returns {string|null} The path to the project root directory, or null if not found.
*/
function findProjectRoot(
startPath = process.cwd(),
markers = ['package.json', '.git', '.taskmasterconfig']
) {
let currentPath = path.resolve(startPath);
while (true) {
for (const marker of markers) {
if (fs.existsSync(path.join(currentPath, marker))) {
return currentPath;
}
}
const parentPath = path.dirname(currentPath);
if (parentPath === currentPath) {
// Reached the filesystem root
return null;
}
currentPath = parentPath;
}
}
// --- Dynamic Configuration Function --- (REMOVED)
/*
function getConfig(session = null) {
// ... implementation removed ...
}
*/
// Set up logging based on log level
const LOG_LEVELS = {
@@ -73,6 +112,9 @@ function log(level, ...args) {
return;
}
// Get log level dynamically from config-manager
const configLevel = getLogLevel() || 'info'; // Use getter
// Use text prefixes instead of emojis
const prefixes = {
debug: chalk.gray('[DEBUG]'),
@@ -84,7 +126,6 @@ function log(level, ...args) {
// Ensure level exists, default to info if not
const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';
const configLevel = CONFIG.logLevel || 'info'; // Ensure configLevel has a default
// Check log level configuration
if (
@@ -106,12 +147,15 @@ function log(level, ...args) {
* @returns {Object|null} Parsed JSON data or null if error occurs
*/
function readJSON(filepath) {
// Get debug flag dynamically from config-manager
const isDebug = getDebugFlag();
try {
const rawData = fs.readFileSync(filepath, 'utf8');
return JSON.parse(rawData);
} catch (error) {
log('error', `Error reading JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
if (isDebug) {
// Use dynamic debug flag
// Use log utility for debug output too
log('error', 'Full error details:', error);
}
@@ -125,6 +169,8 @@ function readJSON(filepath) {
* @param {Object} data - Data to write
*/
function writeJSON(filepath, data) {
// Get debug flag dynamically from config-manager
const isDebug = getDebugFlag();
try {
const dir = path.dirname(filepath);
if (!fs.existsSync(dir)) {
@@ -133,7 +179,8 @@ function writeJSON(filepath, data) {
fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8');
} catch (error) {
log('error', `Error writing JSON file ${filepath}:`, error.message);
if (CONFIG.debug) {
if (isDebug) {
// Use dynamic debug flag
// Use log utility for debug output too
log('error', 'Full error details:', error);
}
@@ -156,6 +203,8 @@ function sanitizePrompt(prompt) {
* @returns {Object|null} The parsed complexity report or null if not found
*/
function readComplexityReport(customPath = null) {
// Get debug flag dynamically from config-manager
const isDebug = getDebugFlag();
try {
const reportPath =
customPath ||
@@ -168,6 +217,11 @@ function readComplexityReport(customPath = null) {
return JSON.parse(reportData);
} catch (error) {
log('warn', `Could not read complexity report: ${error.message}`);
// Optionally log full error in debug mode
if (isDebug) {
// Use dynamic debug flag
log('error', 'Full error details:', error);
}
return null;
}
}
@@ -399,7 +453,8 @@ function detectCamelCaseFlags(args) {
// Export all utility functions and configuration
export {
CONFIG,
// CONFIG, <-- Already Removed
// getConfig <-- Removing now
LOG_LEVELS,
log,
readJSON,
@@ -417,5 +472,8 @@ export {
enableSilentMode,
disableSilentMode,
isSilentMode,
getTaskManager
resolveEnvVariable,
getTaskManager,
findProjectRoot
// getConfig <-- Removed
};