From 292dd51417d0a4748db5681d5e6f4ff3af0bc65d Mon Sep 17 00:00:00 2001 From: Eyal Toledano Date: Sun, 20 Apr 2025 01:09:30 -0400 Subject: [PATCH] feat(config): Implement new config system and resolve refactoring errors Introduced config-manager.js and new utilities (resolveEnvVariable, findProjectRoot). Removed old global CONFIG object from utils.js. Updated .taskmasterconfig, mcp.json, and .env.example. Added generateComplexityAnalysisPrompt to ui.js. Removed unused updateSubtaskById from task-manager.js. Resolved SyntaxError and ReferenceError issues across commands.js, ui.js, task-manager.js, and ai-services.js by replacing CONFIG references with config-manager getters (getDebugFlag, getProjectName, getDefaultSubtasks, isApiKeySet). Refactored 'models' command to use getConfig/writeConfig. Simplified version checking. This stabilizes the codebase after initial Task 61 refactoring, fixing CLI errors and enabling subsequent work on Subtasks 61.34 and 61.35. --- .cursor/mcp.json | 16 +- .taskmasterconfig | 44 +- assets/env.example | 18 +- scripts/modules/ai-services-unified.js | 368 ++++++ scripts/modules/ai-services.js | 286 ++--- scripts/modules/commands.js | 186 +-- scripts/modules/config-manager.js | 782 +++++-------- scripts/modules/task-manager.js | 43 +- .../modules/task-manager.js (lines 3036-3084) | 32 - scripts/modules/ui.js | 47 +- scripts/modules/utils.js | 92 +- src/ai-providers/anthropic.js | 191 ++++ src/ai-providers/perplexity.js | 176 +++ tasks/task_061.txt | 1018 ++++++++++++++++- tasks/tasks.json | 269 ++++- tests/unit/ai-services-unified.test.js | 683 +++++++++++ 16 files changed, 3454 insertions(+), 797 deletions(-) create mode 100644 scripts/modules/ai-services-unified.js delete mode 100644 scripts/modules/task-manager.js (lines 3036-3084) create mode 100644 src/ai-providers/anthropic.js create mode 100644 src/ai-providers/perplexity.js create mode 100644 tests/unit/ai-services-unified.test.js diff --git a/.cursor/mcp.json b/.cursor/mcp.json index e5433f19..6fbc619f 100644 --- a/.cursor/mcp.json +++ b/.cursor/mcp.json @@ -4,14 +4,14 @@ "command": "node", "args": ["./mcp-server/server.js"], "env": { - "ANTHROPIC_API_KEY": "YOUR_ANTHROPIC_API_KEY_HERE", - "PERPLEXITY_API_KEY": "YOUR_PERPLEXITY_API_KEY_HERE", - "MODEL": "claude-3-7-sonnet-20250219", - "PERPLEXITY_MODEL": "sonar-pro", - "MAX_TOKENS": 64000, - "TEMPERATURE": 0.2, - "DEFAULT_SUBTASKS": 5, - "DEFAULT_PRIORITY": "medium" + "ANTHROPIC_API_KEY": "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", + "PERPLEXITY_API_KEY": "pplx-1234567890", + "OPENAI_API_KEY": "sk-proj-1234567890", + "GOOGLE_API_KEY": "AIzaSyB1234567890", + "GROK_API_KEY": "gsk_1234567890", + "MISTRAL_API_KEY": "mst_1234567890", + "AZURE_OPENAI_API_KEY": "1234567890", + "AZURE_OPENAI_ENDPOINT": "https://your-endpoint.openai.azure.com/" } } } diff --git a/.taskmasterconfig b/.taskmasterconfig index eff6124d..d797f1fa 100644 --- a/.taskmasterconfig +++ b/.taskmasterconfig @@ -1,16 +1,30 @@ { - "models": { - "main": { - "provider": "google", - "modelId": "gemini-2.5-pro-latest" - }, - "research": { - "provider": "perplexity", - "modelId": "deep-research" - }, - "fallback": { - "provider": "anthropic", - "modelId": "claude-3-7-sonnet-20250219" - } - } -} \ No newline at end of file + "models": { + "main": { + "provider": "google", + "modelId": "gemini-2.5-pro-latest", + "maxTokens": 256000, + "temperature": 0.2 + }, + "research": { + "provider": "perplexity", + "modelId": "sonar-pro", + "maxTokens": 8700, + "temperature": 0.1 + }, + "fallback": { + "provider": "anthropic", + "modelId": "claude-3-7-sonnet-20250219", + "maxTokens": 128000, + "temperature": 0.2 + } + }, + "global": { + "logLevel": "info", + "debug": false, + "defaultSubtasks": 5, + "defaultPriority": "medium", + "projectName": "Task Master", + "ollamaBaseUrl": "http://localhost:11434/api" + } +} diff --git a/assets/env.example b/assets/env.example index 551fd49a..f2ce88d6 100644 --- a/assets/env.example +++ b/assets/env.example @@ -1,9 +1,12 @@ -# Required -ANTHROPIC_API_KEY=your-api-key-here # For most AI ops -- Format: sk-ant-api03-... (Required) -PERPLEXITY_API_KEY=pplx-abcde # For research -- Format: pplx-abcde (Optional, Highly Recommended) -OPENAI_API_KEY=sk-proj-... # For OpenAI/OpenRouter models (Optional) -- Format: sk-proj-... -GOOGLE_API_KEY=AIzaSy... # For Google Gemini models (Optional) -GROK_API_KEY=your-grok-api-key-here # For XAI Grok models (Optional) +# API Keys (Required to enable respective provider) +ANTHROPIC_API_KEY=your_anthropic_api_key_here # Required: Format: sk-ant-api03-... +PERPLEXITY_API_KEY=your_perplexity_api_key_here # Optional: Format: pplx-... +OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenRouter models. Format: sk-proj-... +GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models. +GROK_API_KEY=your_grok_api_key_here # Optional, for XAI Grok models. +MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models. +AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models. +AZURE_OPENAI_ENDPOINT=your_azure_endpoint_here # Optional, for Azure OpenAI. # Optional - defaults shown MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229 (Required) @@ -14,4 +17,5 @@ DEBUG=false # Enable debug logging (true/false) LOG_LEVEL=info # Log level (debug, info, warn, error) DEFAULT_SUBTASKS=5 # Default number of subtasks when expanding DEFAULT_PRIORITY=medium # Default priority for generated tasks (high, medium, low) -PROJECT_NAME={{projectName}} # Project name for tasks.json metadata \ No newline at end of file +PROJECT_NAME={{projectName}} # Project name for tasks.json metadata +OLLAMA_BASE_URL=http://localhost:11434/api # Base URL for local Ollama instance (Optional) \ No newline at end of file diff --git a/scripts/modules/ai-services-unified.js b/scripts/modules/ai-services-unified.js new file mode 100644 index 00000000..a701fe7d --- /dev/null +++ b/scripts/modules/ai-services-unified.js @@ -0,0 +1,368 @@ +/** + * ai-services-unified.js + * Centralized AI service layer using ai-client-factory and AI SDK core functions. + */ + +import { generateText } from 'ai'; +import { getClient } from './ai-client-factory.js'; +import { log } from './utils.js'; // Import log for retry logging +// Import logger from utils later when needed +// import { log } from './utils.js'; + +// --- Configuration for Retries --- +const MAX_RETRIES = 2; // Total attempts = 1 + MAX_RETRIES +const INITIAL_RETRY_DELAY_MS = 1000; // 1 second + +// Helper function to check if an error is retryable +function isRetryableError(error) { + const errorMessage = error.message?.toLowerCase() || ''; + // Add common retryable error patterns + return ( + errorMessage.includes('rate limit') || + errorMessage.includes('overloaded') || + errorMessage.includes('service temporarily unavailable') || + errorMessage.includes('timeout') || + errorMessage.includes('network error') || + // Add specific status codes if available from the SDK errors + error.status === 429 || // Too Many Requests + error.status >= 500 // Server-side errors + ); +} + +/** + * Internal helper to attempt an AI SDK API call with retries. + * + * @param {object} client - The AI client instance. + * @param {function} apiCallFn - The AI SDK function to call (e.g., generateText). + * @param {object} apiParams - Parameters for the AI SDK function (excluding model). + * @param {string} attemptRole - The role being attempted (for logging). + * @returns {Promise} The result from the successful API call. + * @throws {Error} If the call fails after all retries. + */ +async function _attemptApiCallWithRetries( + client, + apiCallFn, + apiParams, + attemptRole +) { + let retries = 0; + while (retries <= MAX_RETRIES) { + try { + log( + 'info', + `Attempt ${retries + 1}/${MAX_RETRIES + 1} calling ${apiCallFn.name} for role ${attemptRole}` + ); + // Call the provided AI SDK function (generateText, streamText, etc.) + const result = await apiCallFn({ model: client, ...apiParams }); + log( + 'info', + `${apiCallFn.name} succeeded for role ${attemptRole} on attempt ${retries + 1}` + ); + return result; // Success! + } catch (error) { + log( + 'warn', + `Attempt ${retries + 1} failed for role ${attemptRole} (${apiCallFn.name}): ${error.message}` + ); + + if (isRetryableError(error) && retries < MAX_RETRIES) { + retries++; + const delay = INITIAL_RETRY_DELAY_MS * Math.pow(2, retries - 1); + log( + 'info', + `Retryable error detected. Retrying in ${delay / 1000}s...` + ); + await new Promise((resolve) => setTimeout(resolve, delay)); + } else { + log( + 'error', + `Non-retryable error or max retries reached for role ${attemptRole} (${apiCallFn.name}).` + ); + throw error; // Final failure for this attempt chain + } + } + } + // Should theoretically not be reached due to throw in the else block, but needed for linting/type safety + throw new Error( + `Exhausted all retries for role ${attemptRole} (${apiCallFn.name})` + ); +} + +/** + * Unified service function for generating text. + * Handles client retrieval, retries, and fallback (main -> fallback -> research). + * TODO: Add detailed logging. + * + * @param {object} params - Parameters for the service call. + * @param {string} params.role - The initial client role ('main', 'research', 'fallback'). + * @param {object} [params.session=null] - Optional MCP session object. + * @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory { provider, modelId }. + * @param {string} params.prompt - The prompt for the AI. + * @param {number} [params.maxTokens] - Max tokens for the generation. + * @param {number} [params.temperature] - Temperature setting. + * // ... include other standard generateText options as needed ... + * @returns {Promise} The result from the AI SDK's generateText function. + */ +async function generateTextService(params) { + const { + role: initialRole, + session, + overrideOptions, + ...generateTextParams + } = params; + log('info', 'generateTextService called', { role: initialRole }); + + // Determine the sequence explicitly based on the initial role + let sequence; + if (initialRole === 'main') { + sequence = ['main', 'fallback', 'research']; + } else if (initialRole === 'fallback') { + sequence = ['fallback', 'research']; // Try fallback, then research + } else if (initialRole === 'research') { + sequence = ['research', 'fallback']; // Try research, then fallback + } else { + // Default sequence if initialRole is unknown or invalid + log( + 'warn', + `Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.` + ); + sequence = ['main', 'fallback', 'research']; + } + + let lastError = null; + + // Iterate through the determined sequence + for (const currentRole of sequence) { + // Removed the complex conditional check, as the sequence is now pre-determined + + log('info', `Attempting service call with role: ${currentRole}`); + let client; + try { + client = await getClient(currentRole, session, overrideOptions); + const clientInfo = { + provider: client?.provider || 'unknown', + model: client?.modelId || client?.model || 'unknown' + }; + log('info', 'Retrieved AI client', clientInfo); + + // Attempt the API call with retries using the helper + const result = await _attemptApiCallWithRetries( + client, + generateText, + generateTextParams, + currentRole + ); + log('info', `generateTextService succeeded using role: ${currentRole}`); // Add success log + return result; // Success! + } catch (error) { + log( + 'error', // Log as error since this role attempt failed + `Service call failed for role ${currentRole}: ${error.message}` + ); + lastError = error; // Store the error to throw if all roles in sequence fail + + // Log the reason for moving to the next role + if (!client) { + log( + 'warn', + `Could not get client for role ${currentRole}, trying next role in sequence...` + ); + } else { + // Error happened during API call after client was retrieved + log( + 'warn', + `Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...` + ); + } + // Continue to the next role in the sequence automatically + } + } + + // If loop completes, all roles in the sequence failed + log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`); + throw ( + lastError || + new Error( + 'AI service call failed for all configured roles in the sequence.' + ) + ); +} + +// TODO: Implement streamTextService, generateObjectService etc. + +/** + * Unified service function for streaming text. + * Handles client retrieval, retries, and fallback sequence. + * + * @param {object} params - Parameters for the service call. + * @param {string} params.role - The initial client role ('main', 'research', 'fallback'). + * @param {object} [params.session=null] - Optional MCP session object. + * @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory. + * @param {string} params.prompt - The prompt for the AI. + * // ... include other standard streamText options as needed ... + * @returns {Promise} The result from the AI SDK's streamText function (typically a Streamable object). + */ +async function streamTextService(params) { + const { + role: initialRole, + session, + overrideOptions, + ...streamTextParams // Collect remaining params for streamText + } = params; + log('info', 'streamTextService called', { role: initialRole }); + + let sequence; + if (initialRole === 'main') { + sequence = ['main', 'fallback', 'research']; + } else if (initialRole === 'fallback') { + sequence = ['fallback', 'research']; + } else if (initialRole === 'research') { + sequence = ['research', 'fallback']; + } else { + log( + 'warn', + `Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.` + ); + sequence = ['main', 'fallback', 'research']; + } + + let lastError = null; + + for (const currentRole of sequence) { + log('info', `Attempting service call with role: ${currentRole}`); + let client; + try { + client = await getClient(currentRole, session, overrideOptions); + const clientInfo = { + provider: client?.provider || 'unknown', + model: client?.modelId || client?.model || 'unknown' + }; + log('info', 'Retrieved AI client', clientInfo); + + const result = await _attemptApiCallWithRetries( + client, + streamText, // Pass streamText function + streamTextParams, + currentRole + ); + log('info', `streamTextService succeeded using role: ${currentRole}`); + return result; + } catch (error) { + log( + 'error', + `Service call failed for role ${currentRole}: ${error.message}` + ); + lastError = error; + + if (!client) { + log( + 'warn', + `Could not get client for role ${currentRole}, trying next role in sequence...` + ); + } else { + log( + 'warn', + `Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...` + ); + } + } + } + + log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`); + throw ( + lastError || + new Error( + 'AI service call (streamText) failed for all configured roles in the sequence.' + ) + ); +} + +/** + * Unified service function for generating structured objects. + * Handles client retrieval, retries, and fallback sequence. + * + * @param {object} params - Parameters for the service call. + * @param {string} params.role - The initial client role ('main', 'research', 'fallback'). + * @param {object} [params.session=null] - Optional MCP session object. + * @param {object} [params.overrideOptions={}] - Optional overrides for ai-client-factory. + * @param {z.Schema} params.schema - The Zod schema for the expected object. + * @param {string} params.prompt - The prompt for the AI. + * // ... include other standard generateObject options as needed ... + * @returns {Promise} The result from the AI SDK's generateObject function. + */ +async function generateObjectService(params) { + const { + role: initialRole, + session, + overrideOptions, + ...generateObjectParams // Collect remaining params for generateObject + } = params; + log('info', 'generateObjectService called', { role: initialRole }); + + let sequence; + if (initialRole === 'main') { + sequence = ['main', 'fallback', 'research']; + } else if (initialRole === 'fallback') { + sequence = ['fallback', 'research']; + } else if (initialRole === 'research') { + sequence = ['research', 'fallback']; + } else { + log( + 'warn', + `Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.` + ); + sequence = ['main', 'fallback', 'research']; + } + + let lastError = null; + + for (const currentRole of sequence) { + log('info', `Attempting service call with role: ${currentRole}`); + let client; + try { + client = await getClient(currentRole, session, overrideOptions); + const clientInfo = { + provider: client?.provider || 'unknown', + model: client?.modelId || client?.model || 'unknown' + }; + log('info', 'Retrieved AI client', clientInfo); + + const result = await _attemptApiCallWithRetries( + client, + generateObject, // Pass generateObject function + generateObjectParams, + currentRole + ); + log('info', `generateObjectService succeeded using role: ${currentRole}`); + return result; + } catch (error) { + log( + 'error', + `Service call failed for role ${currentRole}: ${error.message}` + ); + lastError = error; + + if (!client) { + log( + 'warn', + `Could not get client for role ${currentRole}, trying next role in sequence...` + ); + } else { + log( + 'warn', + `Retries exhausted or non-retryable error for role ${currentRole}, trying next role in sequence...` + ); + } + } + } + + log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`); + throw ( + lastError || + new Error( + 'AI service call (generateObject) failed for all configured roles in the sequence.' + ) + ); +} + +export { generateTextService, streamTextService, generateObjectService }; diff --git a/scripts/modules/ai-services.js b/scripts/modules/ai-services.js index 45c99464..e10e5862 100644 --- a/scripts/modules/ai-services.js +++ b/scripts/modules/ai-services.js @@ -8,9 +8,18 @@ import { Anthropic } from '@anthropic-ai/sdk'; import OpenAI from 'openai'; import dotenv from 'dotenv'; -import { CONFIG, log, sanitizePrompt, isSilentMode } from './utils.js'; +import { log, sanitizePrompt, isSilentMode } from './utils.js'; import { startLoadingIndicator, stopLoadingIndicator } from './ui.js'; import chalk from 'chalk'; +import { + getMainModelId, + getMainMaxTokens, + getMainTemperature, + getDebugFlag, + getResearchModelId, + getResearchMaxTokens, + getResearchTemperature +} from './config-manager.js'; // Load environment variables dotenv.config(); @@ -218,7 +227,7 @@ Important: Your response must be valid JSON only, with no additional explanation prdContent, prdPath, numTasks, - modelConfig?.maxTokens || CONFIG.maxTokens, + modelConfig?.maxTokens || getMainMaxTokens(null), systemPrompt, { reportProgress, mcpLog, session }, aiClient || anthropic, @@ -254,7 +263,7 @@ Important: Your response must be valid JSON only, with no additional explanation ); } else { console.error(chalk.red(userMessage)); - if (CONFIG.debug) { + if (getDebugFlag(null)) { log('debug', 'Full error:', error); } throw new Error(userMessage); @@ -287,54 +296,46 @@ async function handleStreamingRequest( aiClient = null, modelConfig = null ) { - // Determine output format based on mcpLog presence - const outputFormat = mcpLog ? 'json' : 'text'; - - // Create custom reporter that checks for MCP log and silent mode const report = (message, level = 'info') => { - if (mcpLog) { + if (mcpLog && typeof mcpLog[level] === 'function') { mcpLog[level](message); - } else if (!isSilentMode() && outputFormat === 'text') { - // Only log to console if not in silent mode and outputFormat is 'text' + } else if (!isSilentMode()) { log(level, message); } }; - // Only show loading indicators for text output (CLI) - let loadingIndicator = null; - if (outputFormat === 'text' && !isSilentMode()) { - loadingIndicator = startLoadingIndicator('Generating tasks from PRD...'); + let loadingIndicator; + if (!isSilentMode() && !mcpLog) { + loadingIndicator = startLoadingIndicator('Claude is thinking...'); } - if (reportProgress) { - await reportProgress({ progress: 0 }); - } - let responseText = ''; - let streamingInterval = null; + let textContent = ''; + let finalResponse = null; + let claudeOverloaded = false; try { - // Use streaming for handling large responses - const stream = await (aiClient || anthropic).messages.create({ - model: - modelConfig?.model || session?.env?.ANTHROPIC_MODEL || CONFIG.model, - max_tokens: - modelConfig?.maxTokens || session?.env?.MAX_TOKENS || maxTokens, - temperature: - modelConfig?.temperature || - session?.env?.TEMPERATURE || - CONFIG.temperature, + const modelToUse = modelConfig?.modelId || getMainModelId(null); + const temperatureToUse = + modelConfig?.temperature || getMainTemperature(null); + const clientToUse = aiClient || anthropic; + + report(`Using model: ${modelToUse} with temp: ${temperatureToUse}`); + + const stream = await clientToUse.messages.stream({ + model: modelToUse, + max_tokens: maxTokens, + temperature: temperatureToUse, system: systemPrompt, messages: [ { role: 'user', content: `Here's the Product Requirements Document (PRD) to break down into ${numTasks} tasks:\n\n${prdContent}` } - ], - stream: true + ] }); - // Update loading indicator to show streaming progress - only for text output - if (outputFormat === 'text' && !isSilentMode()) { + let streamingInterval = null; + if (!isSilentMode() && process.stdout.isTTY) { let dotCount = 0; const readline = await import('readline'); streamingInterval = setInterval(() => { @@ -346,64 +347,76 @@ async function handleStreamingRequest( }, 500); } - // Process the stream for await (const chunk of stream) { if (chunk.type === 'content_block_delta' && chunk.delta.text) { - responseText += chunk.delta.text; + textContent += chunk.delta.text; } if (reportProgress) { await reportProgress({ - progress: (responseText.length / maxTokens) * 100 + progress: (textContent.length / maxTokens) * 100 }); } if (mcpLog) { - mcpLog.info(`Progress: ${(responseText.length / maxTokens) * 100}%`); + mcpLog.info(`Progress: ${(textContent.length / maxTokens) * 100}%`); } } if (streamingInterval) clearInterval(streamingInterval); - - // Only call stopLoadingIndicator if we started one - if (loadingIndicator && outputFormat === 'text' && !isSilentMode()) { - stopLoadingIndicator(loadingIndicator); + if (loadingIndicator) { + stopLoadingIndicator( + loadingIndicator, + 'Claude processing finished', + true + ); + loadingIndicator = null; } - report( - `Completed streaming response from ${aiClient ? 'provided' : 'default'} AI client!`, - 'info' - ); - - // Pass options to processClaudeResponse - return processClaudeResponse( - responseText, + finalResponse = processClaudeResponse( + textContent, numTasks, 0, prdContent, prdPath, { reportProgress, mcpLog, session } ); + + if (claudeOverloaded) { + report('Claude is overloaded, falling back to Perplexity', 'warn'); + const perplexityClient = getPerplexityClient(); + finalResponse = await handleStreamingRequest( + prdContent, + prdPath, + numTasks, + maxTokens, + systemPrompt, + { reportProgress, mcpLog, session }, + perplexityClient, + modelConfig + ); + } + + return finalResponse; } catch (error) { - if (streamingInterval) clearInterval(streamingInterval); - - // Only call stopLoadingIndicator if we started one - if (loadingIndicator && outputFormat === 'text' && !isSilentMode()) { - stopLoadingIndicator(loadingIndicator); + if (loadingIndicator) { + stopLoadingIndicator(loadingIndicator, 'Claude stream failed', false); + loadingIndicator = null; } - // Get user-friendly error message + if (error.error?.type === 'overloaded_error') { + claudeOverloaded = true; + } const userMessage = handleClaudeError(error); - report(`Error: ${userMessage}`, 'error'); + report(userMessage, 'error'); - // Only show console error for text output (CLI) - if (outputFormat === 'text' && !isSilentMode()) { - console.error(chalk.red(userMessage)); + throw error; + } finally { + if (loadingIndicator) { + const success = !!finalResponse; + const message = success + ? 'Claude stream finished' + : 'Claude stream ended'; + stopLoadingIndicator(loadingIndicator, message, success); } - - if (CONFIG.debug && outputFormat === 'text' && !isSilentMode()) { - log('debug', 'Full error:', error); - } - - throw new Error(userMessage); } } @@ -528,18 +541,27 @@ async function generateSubtasks( additionalContext = '', { reportProgress, mcpLog, session } = {} ) { + log('info', `Generating ${numSubtasks} subtasks for Task ${task.id}...`); + const report = (message, level = 'info') => { + if (mcpLog && typeof mcpLog[level] === 'function') { + mcpLog[level](message); + } else if (!isSilentMode()) { + log(level, message); + } + }; + + let loadingIndicator; + if (!isSilentMode() && !mcpLog) { + loadingIndicator = startLoadingIndicator( + 'Claude is generating subtasks...' + ); + } + + const model = getMainModelId(null); + const maxTokens = getMainMaxTokens(null); + const temperature = getMainTemperature(null); + try { - log( - 'info', - `Generating ${numSubtasks} subtasks for task ${task.id}: ${task.title}` - ); - - const loadingIndicator = startLoadingIndicator( - `Generating subtasks for task ${task.id}...` - ); - let streamingInterval = null; - let responseText = ''; - const systemPrompt = `You are an AI assistant helping with task breakdown for software development. You need to break down a high-level task into ${numSubtasks} specific subtasks that can be implemented one by one. @@ -585,72 +607,62 @@ Return exactly ${numSubtasks} subtasks with the following JSON structure: Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`; - try { - // Update loading indicator to show streaming progress - // Only create interval if not silent and stdout is a TTY - if (!isSilentMode() && process.stdout.isTTY) { - let dotCount = 0; - const readline = await import('readline'); - streamingInterval = setInterval(() => { - readline.cursorTo(process.stdout, 0); - process.stdout.write( - `Generating subtasks for task ${task.id}${'.'.repeat(dotCount)}` - ); - dotCount = (dotCount + 1) % 4; - }, 500); - } - - // TODO: MOVE THIS TO THE STREAM REQUEST FUNCTION (DRY) - - // Use streaming API call - const stream = await anthropic.messages.create({ - model: session?.env?.ANTHROPIC_MODEL || CONFIG.model, - max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens, - temperature: session?.env?.TEMPERATURE || CONFIG.temperature, - system: systemPrompt, - messages: [ - { - role: 'user', - content: userPrompt - } - ], - stream: true - }); - - // Process the stream - for await (const chunk of stream) { - if (chunk.type === 'content_block_delta' && chunk.delta.text) { - responseText += chunk.delta.text; + const stream = await anthropic.messages.create({ + model: model, + max_tokens: maxTokens, + temperature: temperature, + system: systemPrompt, + messages: [ + { + role: 'user', + content: userPrompt } - if (reportProgress) { - await reportProgress({ - progress: (responseText.length / CONFIG.maxTokens) * 100 - }); - } - if (mcpLog) { - mcpLog.info( - `Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%` - ); - } - } + ], + stream: true + }); - if (streamingInterval) clearInterval(streamingInterval); - stopLoadingIndicator(loadingIndicator); + let responseText = ''; + let streamingInterval = null; - log('info', `Completed generating subtasks for task ${task.id}`); - - return parseSubtasksFromText( - responseText, - nextSubtaskId, - numSubtasks, - task.id - ); - } catch (error) { - if (streamingInterval) clearInterval(streamingInterval); - stopLoadingIndicator(loadingIndicator); - throw error; + if (!isSilentMode() && process.stdout.isTTY) { + let dotCount = 0; + const readline = await import('readline'); + streamingInterval = setInterval(() => { + readline.cursorTo(process.stdout, 0); + process.stdout.write( + `Generating subtasks for task ${task.id}${'.'.repeat(dotCount)}` + ); + dotCount = (dotCount + 1) % 4; + }, 500); } + + for await (const chunk of stream) { + if (chunk.type === 'content_block_delta' && chunk.delta.text) { + responseText += chunk.delta.text; + } + if (reportProgress) { + await reportProgress({ + progress: (responseText.length / maxTokens) * 100 + }); + } + if (mcpLog) { + mcpLog.info(`Progress: ${(responseText.length / maxTokens) * 100}%`); + } + } + + if (streamingInterval) clearInterval(streamingInterval); + if (loadingIndicator) stopLoadingIndicator(loadingIndicator); + + log('info', `Completed generating subtasks for task ${task.id}`); + + return parseSubtasksFromText( + responseText, + nextSubtaskId, + numSubtasks, + task.id + ); } catch (error) { + if (loadingIndicator) stopLoadingIndicator(loadingIndicator); log('error', `Error generating subtasks: ${error.message}`); throw error; } diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index d62e626d..be0858aa 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -13,7 +13,7 @@ import inquirer from 'inquirer'; import ora from 'ora'; import Table from 'cli-table3'; -import { CONFIG, log, readJSON, writeJSON } from './utils.js'; +import { log, readJSON, writeJSON } from './utils.js'; import { parsePRD, updateTasks, @@ -45,16 +45,16 @@ import { getMainModelId, getResearchModelId, getFallbackModelId, - setMainModel, - setResearchModel, - setFallbackModel, getAvailableModels, VALID_PROVIDERS, getMainProvider, getResearchProvider, getFallbackProvider, - hasApiKeyForProvider, - getMcpApiKeyStatus + isApiKeySet, + getMcpApiKeyStatus, + getDebugFlag, + getConfig, + writeConfig } from './config-manager.js'; import { @@ -399,7 +399,8 @@ function registerCommands(programInstance) { ); } - if (CONFIG.debug) { + // Use getDebugFlag getter instead of CONFIG.debug + if (getDebugFlag(null)) { console.error(error); } @@ -554,7 +555,8 @@ function registerCommands(programInstance) { ); } - if (CONFIG.debug) { + // Use getDebugFlag getter instead of CONFIG.debug + if (getDebugFlag(null)) { console.error(error); } @@ -640,8 +642,8 @@ function registerCommands(programInstance) { .option('-a, --all', 'Expand all tasks') .option( '-n, --num ', - 'Number of subtasks to generate', - CONFIG.defaultSubtasks.toString() + 'Number of subtasks to generate (default from config)', + '5' // Set a simple string default here ) .option( '--research', @@ -657,7 +659,11 @@ function registerCommands(programInstance) { ) .action(async (options) => { const idArg = options.id; - const numSubtasks = options.num || CONFIG.defaultSubtasks; + // Get the actual default if the user didn't provide --num + const numSubtasks = + options.num === '5' + ? getDefaultSubtasks(null) + : parseInt(options.num, 10); const useResearch = options.research || false; const additionalContext = options.prompt || ''; const forceFlag = options.force || false; @@ -917,7 +923,7 @@ function registerCommands(programInstance) { console.log(chalk.gray('Next: Complete this task or add more tasks')); } catch (error) { console.error(chalk.red(`Error adding task: ${error.message}`)); - if (error.stack && CONFIG.debug) { + if (error.stack && getDebugFlag(null)) { console.error(error.stack); } process.exit(1); @@ -1583,13 +1589,13 @@ function registerCommands(programInstance) { ) .option('--setup', 'Run interactive setup to configure models') .action(async (options) => { - let modelSetAction = false; // Track if any set action was performed + let configModified = false; // Track if config needs saving const availableModels = getAvailableModels(); // Get available models once + const currentConfig = getConfig(); // Load current config once // Helper to find provider for a given model ID - const findProvider = (modelId) => { - const modelInfo = availableModels.find((m) => m.id === modelId); - return modelInfo?.provider; + const findModelData = (modelId) => { + return availableModels.find((m) => m.id === modelId); }; try { @@ -1601,27 +1607,27 @@ function registerCommands(programInstance) { ); process.exit(1); } - const provider = findProvider(modelId); - if (!provider) { + const modelData = findModelData(modelId); + if (!modelData || !modelData.provider) { console.error( chalk.red( - `Error: Model ID "${modelId}" not found in available models.` + `Error: Model ID "${modelId}" not found or invalid in available models.` ) ); process.exit(1); } - if (setMainModel(provider, modelId)) { - // Call specific setter - console.log( - chalk.green( - `Main model set to: ${modelId} (Provider: ${provider})` - ) - ); - modelSetAction = true; - } else { - console.error(chalk.red(`Failed to set main model.`)); - process.exit(1); - } + // Update the loaded config object + currentConfig.models.main = { + ...currentConfig.models.main, // Keep existing params like maxTokens + provider: modelData.provider, + modelId: modelId + }; + console.log( + chalk.blue( + `Preparing to set main model to: ${modelId} (Provider: ${modelData.provider})` + ) + ); + configModified = true; } if (options.setResearch) { @@ -1632,27 +1638,27 @@ function registerCommands(programInstance) { ); process.exit(1); } - const provider = findProvider(modelId); - if (!provider) { + const modelData = findModelData(modelId); + if (!modelData || !modelData.provider) { console.error( chalk.red( - `Error: Model ID "${modelId}" not found in available models.` + `Error: Model ID "${modelId}" not found or invalid in available models.` ) ); process.exit(1); } - if (setResearchModel(provider, modelId)) { - // Call specific setter - console.log( - chalk.green( - `Research model set to: ${modelId} (Provider: ${provider})` - ) - ); - modelSetAction = true; - } else { - console.error(chalk.red(`Failed to set research model.`)); - process.exit(1); - } + // Update the loaded config object + currentConfig.models.research = { + ...currentConfig.models.research, // Keep existing params like maxTokens + provider: modelData.provider, + modelId: modelId + }; + console.log( + chalk.blue( + `Preparing to set research model to: ${modelId} (Provider: ${modelData.provider})` + ) + ); + configModified = true; } if (options.setFallback) { @@ -1663,30 +1669,49 @@ function registerCommands(programInstance) { ); process.exit(1); } - const provider = findProvider(modelId); - if (!provider) { + const modelData = findModelData(modelId); + if (!modelData || !modelData.provider) { console.error( chalk.red( - `Error: Model ID "${modelId}" not found in available models.` + `Error: Model ID "${modelId}" not found or invalid in available models.` ) ); process.exit(1); } - if (setFallbackModel(provider, modelId)) { - // Call specific setter - console.log( - chalk.green( - `Fallback model set to: ${modelId} (Provider: ${provider})` - ) - ); - modelSetAction = true; - } else { - console.error(chalk.red(`Failed to set fallback model.`)); - process.exit(1); - } + // Update the loaded config object + currentConfig.models.fallback = { + ...currentConfig.models.fallback, // Keep existing params like maxTokens + provider: modelData.provider, + modelId: modelId + }; + console.log( + chalk.blue( + `Preparing to set fallback model to: ${modelId} (Provider: ${modelData.provider})` + ) + ); + configModified = true; } - // Handle interactive setup first + // If any config was modified, write it back to the file + if (configModified) { + if (writeConfig(currentConfig)) { + console.log( + chalk.green( + 'Configuration successfully updated in .taskmasterconfig' + ) + ); + } else { + console.error( + chalk.red( + 'Error writing updated configuration to .taskmasterconfig' + ) + ); + process.exit(1); + } + return; // Exit after successful set operation + } + + // Handle interactive setup first (Keep existing setup logic) if (options.setup) { console.log(chalk.cyan.bold('\nInteractive Model Setup:')); @@ -1817,8 +1842,8 @@ function registerCommands(programInstance) { return; // Exit after setup } - // If no set flags were used and not in setup mode, list the models - if (!modelSetAction && !options.setup) { + // If no set flags were used and not in setup mode, list the models (Keep existing list logic) + if (!configModified && !options.setup) { // Fetch current settings const mainProvider = getMainProvider(); const mainModelId = getMainModelId(); @@ -1828,12 +1853,12 @@ function registerCommands(programInstance) { const fallbackModelId = getFallbackModelId(); // May be undefined // Check API keys for both CLI (.env) and MCP (mcp.json) - const mainCliKeyOk = hasApiKeyForProvider(mainProvider); + const mainCliKeyOk = isApiKeySet(mainProvider); // <-- Use correct function name const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider); - const researchCliKeyOk = hasApiKeyForProvider(researchProvider); + const researchCliKeyOk = isApiKeySet(researchProvider); // <-- Use correct function name const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider); const fallbackCliKeyOk = fallbackProvider - ? hasApiKeyForProvider(fallbackProvider) + ? isApiKeySet(fallbackProvider) // <-- Use correct function name : true; // No key needed if no fallback is set const fallbackMcpKeyOk = fallbackProvider ? getMcpApiKeyStatus(fallbackProvider) @@ -2080,7 +2105,7 @@ function registerCommands(programInstance) { } } catch (error) { log(`Error processing models command: ${error.message}`, 'error'); - if (error.stack && CONFIG.debug) { + if (error.stack && getDebugFlag(null)) { log(error.stack, 'debug'); } process.exit(1); @@ -2100,7 +2125,7 @@ function setupCLI() { .name('dev') .description('AI-driven development task management') .version(() => { - // Read version directly from package.json + // Read version directly from package.json ONLY try { const packageJsonPath = path.join(process.cwd(), 'package.json'); if (fs.existsSync(packageJsonPath)) { @@ -2110,9 +2135,13 @@ function setupCLI() { return packageJson.version; } } catch (error) { - // Silently fall back to default version + // Silently fall back to 'unknown' + log( + 'warn', + 'Could not read package.json for version info in .version()' + ); } - return CONFIG.projectVersion; // Default fallback + return 'unknown'; // Default fallback if package.json fails }) .helpOption('-h, --help', 'Display help') .addHelpCommand(false) // Disable default help command @@ -2141,16 +2170,21 @@ function setupCLI() { * @returns {Promise<{currentVersion: string, latestVersion: string, needsUpdate: boolean}>} */ async function checkForUpdate() { - // Get current version from package.json - let currentVersion = CONFIG.projectVersion; + // Get current version from package.json ONLY + let currentVersion = 'unknown'; // Initialize with a default try { - // Try to get the version from the installed package - const packageJsonPath = path.join( + // Try to get the version from the installed package (if applicable) or current dir + let packageJsonPath = path.join( process.cwd(), 'node_modules', 'task-master-ai', 'package.json' ); + // Fallback to current directory package.json if not found in node_modules + if (!fs.existsSync(packageJsonPath)) { + packageJsonPath = path.join(process.cwd(), 'package.json'); + } + if (fs.existsSync(packageJsonPath)) { const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8')); currentVersion = packageJson.version; @@ -2303,7 +2337,7 @@ async function runCLI(argv = process.argv) { } catch (error) { console.error(chalk.red(`Error: ${error.message}`)); - if (CONFIG.debug) { + if (getDebugFlag(null)) { console.error(error); } diff --git a/scripts/modules/config-manager.js b/scripts/modules/config-manager.js index 867f33f0..edb42d9d 100644 --- a/scripts/modules/config-manager.js +++ b/scripts/modules/config-manager.js @@ -2,6 +2,14 @@ import fs from 'fs'; import path from 'path'; import chalk from 'chalk'; import { fileURLToPath } from 'url'; +import { ZodError } from 'zod'; +import { + log, + readJSON, + writeJSON, + resolveEnvVariable, + findProjectRoot +} from './utils.js'; // Calculate __dirname in ESM const __filename = fileURLToPath(import.meta.url); @@ -28,63 +36,49 @@ try { const CONFIG_FILE_NAME = '.taskmasterconfig'; -// Default configuration -const DEFAULT_MAIN_PROVIDER = 'anthropic'; -const DEFAULT_MAIN_MODEL_ID = 'claude-3.7-sonnet-20250219'; -const DEFAULT_RESEARCH_PROVIDER = 'perplexity'; -const DEFAULT_RESEARCH_MODEL_ID = 'sonar-pro'; +// Define valid providers dynamically from the loaded MODEL_MAP +const VALID_PROVIDERS = Object.keys(MODEL_MAP); -// Define ONE list of all supported providers -const VALID_PROVIDERS = [ - 'anthropic', - 'openai', - 'google', - 'perplexity', - 'ollama', - 'openrouter', - 'grok' -]; - -let projectRoot = null; - -function findProjectRoot() { - // Keep this function as is for CLI context - if (projectRoot) return projectRoot; - - let currentDir = process.cwd(); - while (currentDir !== path.parse(currentDir).root) { - if (fs.existsSync(path.join(currentDir, 'package.json'))) { - projectRoot = currentDir; - return projectRoot; +// Default configuration values (used if .taskmasterconfig is missing or incomplete) +const DEFAULTS = { + models: { + main: { + provider: 'anthropic', + modelId: 'claude-3-7-sonnet-20250219', + maxTokens: 64000, + temperature: 0.2 + }, + research: { + provider: 'perplexity', + modelId: 'sonar-pro', + maxTokens: 8700, + temperature: 0.1 + }, + fallback: { + // No default fallback provider/model initially + provider: 'anthropic', + modelId: 'claude-3-5-sonnet', + maxTokens: 64000, // Default parameters if fallback IS configured + temperature: 0.2 } - currentDir = path.dirname(currentDir); + }, + global: { + logLevel: 'info', + debug: false, + defaultSubtasks: 5, + defaultPriority: 'medium', + projectName: 'Task Master', + ollamaBaseUrl: 'http://localhost:11434/api' } +}; - // Check root directory as a last resort - if (fs.existsSync(path.join(currentDir, 'package.json'))) { - projectRoot = currentDir; - return projectRoot; - } +// --- Internal Config Loading --- +let loadedConfig = null; // Cache for loaded config - // If still not found, maybe look for other markers or return null - // For now, returning null if package.json isn't found up to the root - projectRoot = null; - return null; -} - -function readConfig(explicitRoot = null) { +function _loadAndValidateConfig(explicitRoot = null) { // Determine the root path to use const rootToUse = explicitRoot || findProjectRoot(); - - const defaults = { - models: { - main: { provider: DEFAULT_MAIN_PROVIDER, modelId: DEFAULT_MAIN_MODEL_ID }, - research: { - provider: DEFAULT_RESEARCH_PROVIDER, - modelId: DEFAULT_RESEARCH_MODEL_ID - } - } - }; + const defaults = DEFAULTS; // Use the defined defaults if (!rootToUse) { console.warn( @@ -101,75 +95,60 @@ function readConfig(explicitRoot = null) { const rawData = fs.readFileSync(configPath, 'utf-8'); const parsedConfig = JSON.parse(rawData); - // Deep merge defaults to ensure structure and handle partial configs + // Deep merge with defaults const config = { models: { - main: { - provider: - parsedConfig?.models?.main?.provider ?? - defaults.models.main.provider, - modelId: - parsedConfig?.models?.main?.modelId ?? - defaults.models.main.modelId - }, + main: { ...defaults.models.main, ...parsedConfig?.models?.main }, research: { - provider: - parsedConfig?.models?.research?.provider ?? - defaults.models.research.provider, - modelId: - parsedConfig?.models?.research?.modelId ?? - defaults.models.research.modelId + ...defaults.models.research, + ...parsedConfig?.models?.research }, - // Add merge logic for the fallback model - fallback: { - provider: parsedConfig?.models?.fallback?.provider, - modelId: parsedConfig?.models?.fallback?.modelId - } - } + // Fallback needs careful merging - only merge if provider/model exist + fallback: + parsedConfig?.models?.fallback?.provider && + parsedConfig?.models?.fallback?.modelId + ? { ...defaults.models.fallback, ...parsedConfig.models.fallback } + : { ...defaults.models.fallback } // Use default params even if provider/model missing + }, + global: { ...defaults.global, ...parsedConfig?.global } }; - // Validate loaded providers (main, research, and fallback if it exists) + // --- Validation --- + // Validate main provider/model if (!validateProvider(config.models.main.provider)) { console.warn( chalk.yellow( `Warning: Invalid main provider "${config.models.main.provider}" in ${CONFIG_FILE_NAME}. Falling back to default.` ) ); - config.models.main = { - provider: defaults.models.main.provider, - modelId: defaults.models.main.modelId - }; + config.models.main = { ...defaults.models.main }; } - // Optional: Add warning for model combination if desired, but don't block - // else if (!validateProviderModelCombination(config.models.main.provider, config.models.main.modelId)) { ... } + // Optional: Add warning for model combination if desired + // Validate research provider/model if (!validateProvider(config.models.research.provider)) { console.warn( chalk.yellow( `Warning: Invalid research provider "${config.models.research.provider}" in ${CONFIG_FILE_NAME}. Falling back to default.` ) ); - config.models.research = { - provider: defaults.models.research.provider, - modelId: defaults.models.research.modelId - }; + config.models.research = { ...defaults.models.research }; } - // Optional: Add warning for model combination if desired, but don't block - // else if (!validateProviderModelCombination(config.models.research.provider, config.models.research.modelId)) { ... } + // Optional: Add warning for model combination if desired - // Add validation for fallback provider if it exists + // Validate fallback provider if it exists if ( - config.models.fallback && - config.models.fallback.provider && + config.models.fallback?.provider && !validateProvider(config.models.fallback.provider) ) { console.warn( chalk.yellow( - `Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${CONFIG_FILE_NAME}. Fallback model will be ignored.` + `Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${CONFIG_FILE_NAME}. Fallback model configuration will be ignored.` ) ); - // Unlike main/research, we don't set a default fallback, just ignore it - delete config.models.fallback; + // Clear invalid fallback provider/model, but keep default params if needed elsewhere + config.models.fallback.provider = undefined; + config.models.fallback.modelId = undefined; } return config; @@ -182,10 +161,28 @@ function readConfig(explicitRoot = null) { return defaults; } } else { + // Config file doesn't exist, use defaults return defaults; } } +/** + * Gets the current configuration, loading it if necessary. + * @param {string|null} explicitRoot - Optional explicit path to the project root. + * @param {boolean} forceReload - Force reloading the config file. + * @returns {object} The loaded configuration object. + */ +function getConfig(explicitRoot = null, forceReload = false) { + if (!loadedConfig || forceReload) { + loadedConfig = _loadAndValidateConfig(explicitRoot); + } + // If an explicitRoot was provided for a one-off check, don't cache it permanently + if (explicitRoot && !forceReload) { + return _loadAndValidateConfig(explicitRoot); + } + return loadedConfig; +} + /** * Validates if a provider name is in the list of supported providers. * @param {string} providerName The name of the provider. @@ -215,402 +212,134 @@ function validateProviderModelCombination(providerName, modelId) { ); } -/** - * Gets the currently configured main AI provider. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {string} The name of the main provider. - */ +// --- Role-Specific Getters --- + +function getModelConfigForRole(role, explicitRoot = null) { + const config = getConfig(explicitRoot); + const roleConfig = config?.models?.[role]; + if (!roleConfig) { + log('warn', `No model configuration found for role: ${role}`); + return DEFAULTS.models[role] || {}; // Fallback to default for the role + } + return roleConfig; +} + function getMainProvider(explicitRoot = null) { - const config = readConfig(explicitRoot); - return config.models.main.provider; + return getModelConfigForRole('main', explicitRoot).provider; } -/** - * Gets the currently configured main AI model ID. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {string} The ID of the main model. - */ function getMainModelId(explicitRoot = null) { - const config = readConfig(explicitRoot); - return config.models.main.modelId; + return getModelConfigForRole('main', explicitRoot).modelId; +} + +function getMainMaxTokens(explicitRoot = null) { + return getModelConfigForRole('main', explicitRoot).maxTokens; +} + +function getMainTemperature(explicitRoot = null) { + return getModelConfigForRole('main', explicitRoot).temperature; } -/** - * Gets the currently configured research AI provider. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {string} The name of the research provider. - */ function getResearchProvider(explicitRoot = null) { - const config = readConfig(explicitRoot); - return config.models.research.provider; + return getModelConfigForRole('research', explicitRoot).provider; } -/** - * Gets the currently configured research AI model ID. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {string} The ID of the research model. - */ function getResearchModelId(explicitRoot = null) { - const config = readConfig(explicitRoot); - return config.models.research.modelId; + return getModelConfigForRole('research', explicitRoot).modelId; +} + +function getResearchMaxTokens(explicitRoot = null) { + return getModelConfigForRole('research', explicitRoot).maxTokens; +} + +function getResearchTemperature(explicitRoot = null) { + return getModelConfigForRole('research', explicitRoot).temperature; } -/** - * Gets the currently configured fallback AI provider. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {string|undefined} The name of the fallback provider, or undefined if not set. - */ function getFallbackProvider(explicitRoot = null) { - const config = readConfig(explicitRoot); - return config.models?.fallback?.provider; + // Specifically check if provider is set, as fallback is optional + return getModelConfigForRole('fallback', explicitRoot).provider || undefined; } -/** - * Gets the currently configured fallback AI model ID. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {string|undefined} The ID of the fallback model, or undefined if not set. - */ function getFallbackModelId(explicitRoot = null) { - const config = readConfig(explicitRoot); - return config.models?.fallback?.modelId; + // Specifically check if modelId is set + return getModelConfigForRole('fallback', explicitRoot).modelId || undefined; +} + +function getFallbackMaxTokens(explicitRoot = null) { + // Return fallback tokens even if provider/model isn't set, in case it's needed generically + return getModelConfigForRole('fallback', explicitRoot).maxTokens; +} + +function getFallbackTemperature(explicitRoot = null) { + // Return fallback temp even if provider/model isn't set + return getModelConfigForRole('fallback', explicitRoot).temperature; +} + +// --- Global Settings Getters --- + +function getGlobalConfig(explicitRoot = null) { + const config = getConfig(explicitRoot); + return config?.global || DEFAULTS.global; +} + +function getLogLevel(explicitRoot = null) { + return getGlobalConfig(explicitRoot).logLevel; +} + +function getDebugFlag(explicitRoot = null) { + // Ensure boolean type + return getGlobalConfig(explicitRoot).debug === true; +} + +function getDefaultSubtasks(explicitRoot = null) { + // Ensure integer type + return parseInt(getGlobalConfig(explicitRoot).defaultSubtasks, 10); +} + +function getDefaultPriority(explicitRoot = null) { + return getGlobalConfig(explicitRoot).defaultPriority; +} + +function getProjectName(explicitRoot = null) { + return getGlobalConfig(explicitRoot).projectName; +} + +function getOllamaBaseUrl(explicitRoot = null) { + return getGlobalConfig(explicitRoot).ollamaBaseUrl; } /** - * Sets the main AI model (provider and modelId) in the configuration file. - * @param {string} providerName The name of the provider to set. - * @param {string} modelId The ID of the model to set. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {boolean} True if successful, false otherwise. + * Checks if the API key for a given provider is set in the environment. + * Checks process.env first, then session.env if session is provided. + * @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic'). + * @param {object|null} [session=null] - The MCP session object (optional). + * @returns {boolean} True if the API key is set, false otherwise. */ -function setMainModel(providerName, modelId, explicitRoot = null) { - // --- 1. Validate Provider First --- - if (!validateProvider(providerName)) { - console.error( - chalk.red(`Error: "${providerName}" is not a valid provider.`) - ); - console.log( - chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`) - ); +function isApiKeySet(providerName, session = null) { + // Define the expected environment variable name for each provider + const keyMap = { + openai: 'OPENAI_API_KEY', + anthropic: 'ANTHROPIC_API_KEY', + google: 'GOOGLE_API_KEY', + perplexity: 'PERPLEXITY_API_KEY', + grok: 'GROK_API_KEY', // Assuming GROK_API_KEY based on env.example + mistral: 'MISTRAL_API_KEY', + azure: 'AZURE_OPENAI_API_KEY', // Azure needs endpoint too, but key presence is a start + openrouter: 'OPENROUTER_API_KEY', + xai: 'XAI_API_KEY' + // Add other providers as needed + }; + + const providerKey = providerName?.toLowerCase(); + if (!providerKey || !keyMap[providerKey]) { + log('warn', `Unknown provider name: ${providerName} in isApiKeySet check.`); return false; } - // --- 2. Validate Role Second --- - const allModels = getAvailableModels(); // Get all models to check roles - const modelData = allModels.find( - (m) => m.id === modelId && m.provider === providerName - ); - - if ( - !modelData || - !modelData.allowed_roles || - !modelData.allowed_roles.includes('main') - ) { - console.error( - chalk.red(`Error: Model "${modelId}" is not allowed for the 'main' role.`) - ); - // Try to suggest valid models for the role - const allowedMainModels = allModels - .filter((m) => m.allowed_roles?.includes('main')) - .map((m) => ` - ${m.provider} / ${m.id}`) - .join('\n'); - if (allowedMainModels) { - console.log( - chalk.yellow('\nAllowed models for main role:\n' + allowedMainModels) - ); - } - return false; - } - - // --- 3. Validate Model Combination (Optional Warning) --- - if (!validateProviderModelCombination(providerName, modelId)) { - console.warn( - chalk.yellow( - `Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.` - ) - ); - } - - // --- Proceed with setting --- - const config = readConfig(explicitRoot); - config.models.main = { provider: providerName, modelId: modelId }; - // Pass explicitRoot down - if (writeConfig(config, explicitRoot)) { - console.log( - chalk.green(`Main AI model set to: ${providerName} / ${modelId}`) - ); - return true; - } else { - return false; - } -} - -/** - * Sets the research AI model (provider and modelId) in the configuration file. - * @param {string} providerName The name of the provider to set. - * @param {string} modelId The ID of the model to set. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {boolean} True if successful, false otherwise. - */ -function setResearchModel(providerName, modelId, explicitRoot = null) { - // --- 1. Validate Provider First --- - if (!validateProvider(providerName)) { - console.error( - chalk.red(`Error: "${providerName}" is not a valid provider.`) - ); - console.log( - chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`) - ); - return false; - } - - // --- 2. Validate Role Second --- - const allModels = getAvailableModels(); // Get all models to check roles - const modelData = allModels.find( - (m) => m.id === modelId && m.provider === providerName - ); - - if ( - !modelData || - !modelData.allowed_roles || - !modelData.allowed_roles.includes('research') - ) { - console.error( - chalk.red( - `Error: Model "${modelId}" is not allowed for the 'research' role.` - ) - ); - // Try to suggest valid models for the role - const allowedResearchModels = allModels - .filter((m) => m.allowed_roles?.includes('research')) - .map((m) => ` - ${m.provider} / ${m.id}`) - .join('\n'); - if (allowedResearchModels) { - console.log( - chalk.yellow( - '\nAllowed models for research role:\n' + allowedResearchModels - ) - ); - } - return false; - } - - // --- 3. Validate Model Combination (Optional Warning) --- - if (!validateProviderModelCombination(providerName, modelId)) { - console.warn( - chalk.yellow( - `Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.` - ) - ); - } - - // --- 4. Specific Research Warning (Optional) --- - if ( - providerName === 'anthropic' || - (providerName === 'openai' && modelId.includes('3.5')) - ) { - console.warn( - chalk.yellow( - `Warning: Provider "${providerName}" with model "${modelId}" may not be ideal for research tasks. Perplexity or Grok recommended.` - ) - ); - } - - // --- Proceed with setting --- - const config = readConfig(explicitRoot); - config.models.research = { provider: providerName, modelId: modelId }; - // Pass explicitRoot down - if (writeConfig(config, explicitRoot)) { - console.log( - chalk.green(`Research AI model set to: ${providerName} / ${modelId}`) - ); - return true; - } else { - return false; - } -} - -/** - * Sets the fallback AI model (provider and modelId) in the configuration file. - * @param {string} providerName The name of the provider to set. - * @param {string} modelId The ID of the model to set. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {boolean} True if successful, false otherwise. - */ -function setFallbackModel(providerName, modelId, explicitRoot = null) { - // --- 1. Validate Provider First --- - if (!validateProvider(providerName)) { - console.error( - chalk.red(`Error: "${providerName}" is not a valid provider.`) - ); - console.log( - chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`) - ); - return false; - } - - // --- 2. Validate Role Second --- - const allModels = getAvailableModels(); // Get all models to check roles - const modelData = allModels.find( - (m) => m.id === modelId && m.provider === providerName - ); - - if ( - !modelData || - !modelData.allowed_roles || - !modelData.allowed_roles.includes('fallback') - ) { - console.error( - chalk.red( - `Error: Model "${modelId}" is not allowed for the 'fallback' role.` - ) - ); - // Try to suggest valid models for the role - const allowedFallbackModels = allModels - .filter((m) => m.allowed_roles?.includes('fallback')) - .map((m) => ` - ${m.provider} / ${m.id}`) - .join('\n'); - if (allowedFallbackModels) { - console.log( - chalk.yellow( - '\nAllowed models for fallback role:\n' + allowedFallbackModels - ) - ); - } - return false; - } - - // --- 3. Validate Model Combination (Optional Warning) --- - if (!validateProviderModelCombination(providerName, modelId)) { - console.warn( - chalk.yellow( - `Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.` - ) - ); - } - - // --- Proceed with setting --- - const config = readConfig(explicitRoot); - if (!config.models) { - config.models = {}; // Ensure models object exists - } - // Ensure fallback object exists - if (!config.models.fallback) { - config.models.fallback = {}; - } - - config.models.fallback = { provider: providerName, modelId: modelId }; - - return writeConfig(config, explicitRoot); -} - -/** - * Gets a list of available models based on the MODEL_MAP. - * @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>} - */ -function getAvailableModels() { - const available = []; - for (const [provider, models] of Object.entries(MODEL_MAP)) { - if (models.length > 0) { - models.forEach((modelObj) => { - // Basic name generation - can be improved - const modelId = modelObj.id; - const sweScore = modelObj.swe_score; - const cost = modelObj.cost_per_1m_tokens; - const allowedRoles = modelObj.allowed_roles || ['main', 'fallback']; - const nameParts = modelId - .split('-') - .map((p) => p.charAt(0).toUpperCase() + p.slice(1)); - // Handle specific known names better if needed - let name = nameParts.join(' '); - if (modelId === 'claude-3.5-sonnet-20240620') - name = 'Claude 3.5 Sonnet'; - if (modelId === 'claude-3-7-sonnet-20250219') - name = 'Claude 3.7 Sonnet'; - if (modelId === 'gpt-4o') name = 'GPT-4o'; - if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo'; - if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro'; - if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini'; - - available.push({ - id: modelId, - name: name, - provider: provider, - swe_score: sweScore, - cost_per_1m_tokens: cost, - allowed_roles: allowedRoles - }); - }); - } else { - // For providers with empty lists (like ollama), maybe add a placeholder or skip - available.push({ - id: `[${provider}-any]`, - name: `Any (${provider})`, - provider: provider - }); - } - } - return available; -} - -/** - * Writes the configuration object to the file. - * @param {Object} config The configuration object to write. - * @param {string|null} explicitRoot - Optional explicit path to the project root. - * @returns {boolean} True if successful, false otherwise. - */ -function writeConfig(config, explicitRoot = null) { - const rootPath = explicitRoot || findProjectRoot(); - if (!rootPath) { - console.error( - chalk.red( - 'Error: Could not determine project root. Configuration not saved.' - ) - ); - return false; - } - // Ensure we don't double-join if explicitRoot already contains the filename - const configPath = - path.basename(rootPath) === CONFIG_FILE_NAME - ? rootPath - : path.join(rootPath, CONFIG_FILE_NAME); - - try { - fs.writeFileSync(configPath, JSON.stringify(config, null, 2)); - return true; - } catch (error) { - console.error( - chalk.red( - `Error writing configuration to ${configPath}: ${error.message}` - ) - ); - return false; - } -} - -/** - * Checks if the required API key environment variable is set for a given provider. - * @param {string} providerName The name of the provider. - * @returns {boolean} True if the API key environment variable exists and is non-empty, false otherwise. - */ -function hasApiKeyForProvider(providerName) { - switch (providerName) { - case 'anthropic': - return !!process.env.ANTHROPIC_API_KEY; - case 'openai': - case 'openrouter': // OpenRouter uses OpenAI-compatible key - return !!process.env.OPENAI_API_KEY; - case 'google': - return !!process.env.GOOGLE_API_KEY; - case 'perplexity': - return !!process.env.PERPLEXITY_API_KEY; - case 'grok': - case 'xai': // Added alias for Grok - return !!process.env.GROK_API_KEY; - case 'ollama': - return true; // Ollama runs locally, no cloud API key needed - default: - return false; // Unknown provider cannot have a key checked - } + const envVarName = keyMap[providerKey]; + // Use resolveEnvVariable to check both process.env and session.env + return !!resolveEnvVariable(envVarName, session); } /** @@ -685,24 +414,125 @@ function getMcpApiKeyStatus(providerName) { } } +/** + * Gets a list of available models based on the MODEL_MAP. + * @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>} + */ +function getAvailableModels() { + const available = []; + for (const [provider, models] of Object.entries(MODEL_MAP)) { + if (models.length > 0) { + models.forEach((modelObj) => { + // Basic name generation - can be improved + const modelId = modelObj.id; + const sweScore = modelObj.swe_score; + const cost = modelObj.cost_per_1m_tokens; + const allowedRoles = modelObj.allowed_roles || ['main', 'fallback']; + const nameParts = modelId + .split('-') + .map((p) => p.charAt(0).toUpperCase() + p.slice(1)); + // Handle specific known names better if needed + let name = nameParts.join(' '); + if (modelId === 'claude-3.5-sonnet-20240620') + name = 'Claude 3.5 Sonnet'; + if (modelId === 'claude-3-7-sonnet-20250219') + name = 'Claude 3.7 Sonnet'; + if (modelId === 'gpt-4o') name = 'GPT-4o'; + if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo'; + if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro'; + if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini'; + + available.push({ + id: modelId, + name: name, + provider: provider, + swe_score: sweScore, + cost_per_1m_tokens: cost, + allowed_roles: allowedRoles + }); + }); + } else { + // For providers with empty lists (like ollama), maybe add a placeholder or skip + available.push({ + id: `[${provider}-any]`, + name: `Any (${provider})`, + provider: provider + }); + } + } + return available; +} + +/** + * Writes the configuration object to the file. + * @param {Object} config The configuration object to write. + * @param {string|null} explicitRoot - Optional explicit path to the project root. + * @returns {boolean} True if successful, false otherwise. + */ +function writeConfig(config, explicitRoot = null) { + const rootPath = explicitRoot || findProjectRoot(); + if (!rootPath) { + console.error( + chalk.red( + 'Error: Could not determine project root. Configuration not saved.' + ) + ); + return false; + } + const configPath = + path.basename(rootPath) === CONFIG_FILE_NAME + ? rootPath + : path.join(rootPath, CONFIG_FILE_NAME); + + try { + fs.writeFileSync(configPath, JSON.stringify(config, null, 2)); + loadedConfig = config; // Update the cache after successful write + return true; + } catch (error) { + console.error( + chalk.red( + `Error writing configuration to ${configPath}: ${error.message}` + ) + ); + return false; + } +} + export { - // Not exporting findProjectRoot as it's internal for CLI context now - readConfig, // Keep exporting if direct access is needed elsewhere - writeConfig, // Keep exporting if direct access is needed elsewhere + // Core config access + getConfig, // Might still be useful for getting the whole object + writeConfig, + + // Validation validateProvider, validateProviderModelCombination, - getMainProvider, - getMainModelId, - getResearchProvider, - getResearchModelId, - getFallbackProvider, - getFallbackModelId, - setMainModel, - setResearchModel, - setFallbackModel, VALID_PROVIDERS, MODEL_MAP, getAvailableModels, - hasApiKeyForProvider, + + // Role-specific getters + getMainProvider, + getMainModelId, + getMainMaxTokens, + getMainTemperature, + getResearchProvider, + getResearchModelId, + getResearchMaxTokens, + getResearchTemperature, + getFallbackProvider, + getFallbackModelId, + getFallbackMaxTokens, + getFallbackTemperature, + + // Global setting getters + getLogLevel, + getDebugFlag, + getDefaultSubtasks, + getDefaultPriority, + getProjectName, + getOllamaBaseUrl, + + // API Key Checkers (still relevant) + isApiKeySet, getMcpApiKeyStatus }; diff --git a/scripts/modules/task-manager.js b/scripts/modules/task-manager.js index 257954a1..6c0ceacb 100644 --- a/scripts/modules/task-manager.js +++ b/scripts/modules/task-manager.js @@ -14,7 +14,6 @@ import ora from 'ora'; import inquirer from 'inquirer'; import { - CONFIG, log, readJSON, writeJSON, @@ -86,6 +85,14 @@ try { log('warn', 'Research-backed features will not be available'); } +// Import necessary config getters +import { + getDebugFlag, + getDefaultSubtasks, + getDefaultPriority + // Add other getters here as needed later +} from './config-manager.js'; + /** * Parse a PRD file and generate tasks * @param {string} prdPath - Path to the PRD file @@ -196,7 +203,8 @@ async function parsePRD( if (outputFormat === 'text') { console.error(chalk.red(`Error: ${error.message}`)); - if (CONFIG.debug) { + if (getDebugFlag()) { + // Use getter console.error(error); } @@ -675,7 +683,8 @@ Return only the updated task as a valid JSON object.` console.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.'); } - if (CONFIG.debug) { + if (getDebugFlag()) { + // Use getter console.error(error); } @@ -1337,7 +1346,8 @@ Return only the updated task as a valid JSON object.` console.log(' 2. Use a valid task ID with the --id parameter'); } - if (CONFIG.debug) { + if (getDebugFlag()) { + // Use getter console.error(error); } } else { @@ -1484,7 +1494,8 @@ function generateTaskFiles(tasksPath, outputDir, options = {}) { if (!options?.mcpLog) { console.error(chalk.red(`Error generating task files: ${error.message}`)); - if (CONFIG.debug) { + if (getDebugFlag()) { + // Use getter console.error(error); } @@ -1584,7 +1595,8 @@ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) { if (!options?.mcpLog) { console.error(chalk.red(`Error: ${error.message}`)); - if (CONFIG.debug) { + if (getDebugFlag()) { + // Use getter console.error(error); } @@ -2477,7 +2489,7 @@ async function expandTask( } // Determine the number of subtasks to generate - let subtaskCount = parseInt(numSubtasks, 10) || CONFIG.defaultSubtasks; + let subtaskCount = parseInt(numSubtasks, 10) || getDefaultSubtasks(); // Use getter // Check if we have a complexity analysis for this task let taskAnalysis = null; @@ -2504,7 +2516,7 @@ async function expandTask( // Use recommended number of subtasks if available if ( taskAnalysis.recommendedSubtasks && - subtaskCount === CONFIG.defaultSubtasks + subtaskCount === getDefaultSubtasks() // Use getter ) { subtaskCount = taskAnalysis.recommendedSubtasks; report(`Using recommended number of subtasks: ${subtaskCount}`); @@ -2672,7 +2684,7 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use */ async function expandAllTasks( tasksPath, - numSubtasks = CONFIG.defaultSubtasks, + numSubtasks = getDefaultSubtasks(), // Use getter useResearch = false, additionalContext = '', forceFlag = false, @@ -2698,7 +2710,7 @@ async function expandAllTasks( if (typeof numSubtasks === 'string') { numSubtasks = parseInt(numSubtasks, 10); if (isNaN(numSubtasks)) { - numSubtasks = CONFIG.defaultSubtasks; + numSubtasks = getDefaultSubtasks(); // Use getter } } @@ -3127,7 +3139,7 @@ async function addTask( tasksPath, prompt, dependencies = [], - priority = 'medium', + priority = getDefaultPriority(), // Use getter { reportProgress, mcpLog, session } = {}, outputFormat = 'text', customEnv = null, @@ -4415,7 +4427,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark console.error( chalk.red(`Error parsing complexity analysis: ${error.message}`) ); - if (CONFIG.debug) { + if (getDebugFlag()) { + // Use getter console.debug( chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`) ); @@ -4460,7 +4473,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark ); } - if (CONFIG.debug) { + if (getDebugFlag()) { + // Use getter console.error(error); } @@ -5382,7 +5396,8 @@ Provide concrete examples, code snippets, or implementation details when relevan ); } - if (CONFIG.debug) { + if (getDebugFlag()) { + // Use getter console.error(error); } } else { diff --git a/scripts/modules/task-manager.js (lines 3036-3084) b/scripts/modules/task-manager.js (lines 3036-3084) deleted file mode 100644 index b9b90bb2..00000000 --- a/scripts/modules/task-manager.js (lines 3036-3084) +++ /dev/null @@ -1,32 +0,0 @@ -async function updateSubtaskById(tasksPath, subtaskId, prompt, useResearch = false) { - let loadingIndicator = null; - try { - log('info', `Updating subtask ${subtaskId} with prompt: "${prompt}"`); - - // Validate subtask ID format - if (!subtaskId || typeof subtaskId !== 'string' || !subtaskId.includes('.')) { - throw new Error(`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format "parentId.subtaskId"`); - } - - // Validate prompt - if (!prompt || typeof prompt !== 'string' || prompt.trim() === '') { - throw new Error('Prompt cannot be empty. Please provide context for the subtask update.'); - } - - // Prepare for fallback handling - let claudeOverloaded = false; - - // Validate tasks file exists - if (!fs.existsSync(tasksPath)) { - throw new Error(`Tasks file not found at path: ${tasksPath}`); - } - - // Read the tasks file - const data = readJSON(tasksPath); - // ... rest of the function - } catch (error) { - // Handle errors - console.error(`Error updating subtask: ${error.message}`); - throw error; - } -} \ No newline at end of file diff --git a/scripts/modules/ui.js b/scripts/modules/ui.js index cca71055..e80ede1e 100644 --- a/scripts/modules/ui.js +++ b/scripts/modules/ui.js @@ -10,7 +10,6 @@ import ora from 'ora'; import Table from 'cli-table3'; import gradient from 'gradient-string'; import { - CONFIG, log, findTaskById, readJSON, @@ -20,6 +19,7 @@ import { import path from 'path'; import fs from 'fs'; import { findNextTask, analyzeTaskComplexity } from './task-manager.js'; +import { getProjectName, getDefaultSubtasks } from './config-manager.js'; // Create a color gradient for the banner const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']); @@ -44,7 +44,7 @@ function displayBanner() { ); // Read version directly from package.json - let version = CONFIG.projectVersion; // Default fallback + let version = 'unknown'; // Initialize with a default try { const packageJsonPath = path.join(process.cwd(), 'package.json'); if (fs.existsSync(packageJsonPath)) { @@ -53,12 +53,13 @@ function displayBanner() { } } catch (error) { // Silently fall back to default version + log('warn', 'Could not read package.json for version info.'); } console.log( boxen( chalk.white( - `${chalk.bold('Version:')} ${version} ${chalk.bold('Project:')} ${CONFIG.projectName}` + `${chalk.bold('Version:')} ${version} ${chalk.bold('Project:')} ${getProjectName(null)}` ), { padding: 1, @@ -1652,6 +1653,45 @@ async function displayComplexityReport(reportPath) { ); } +/** + * Generate a prompt for complexity analysis + * @param {Object} tasksData - Tasks data object containing tasks array + * @returns {string} Generated prompt + */ +function generateComplexityAnalysisPrompt(tasksData) { + const defaultSubtasks = getDefaultSubtasks(null); // Use the getter + return `Analyze the complexity of the following tasks and provide recommendations for subtask breakdown: + +${tasksData.tasks + .map( + (task) => ` +Task ID: ${task.id} +Title: ${task.title} +Description: ${task.description} +Details: ${task.details} +Dependencies: ${JSON.stringify(task.dependencies || [])} +Priority: ${task.priority || 'medium'} +` + ) + .join('\n---\n')} + +Analyze each task and return a JSON array with the following structure for each task: +[ + { + "taskId": number, + "taskTitle": string, + "complexityScore": number (1-10), + "recommendedSubtasks": number (${Math.max(3, defaultSubtasks - 1)}-${Math.min(8, defaultSubtasks + 2)}), + "expansionPrompt": string (a specific prompt for generating good subtasks), + "reasoning": string (brief explanation of your assessment) + }, + ... +] + +IMPORTANT: Make sure to include an analysis for EVERY task listed above, with the correct taskId matching each task's ID. +`; +} + /** * Confirm overwriting existing tasks.json file * @param {string} tasksPath - Path to the tasks.json file @@ -1706,5 +1746,6 @@ export { displayNextTask, displayTaskById, displayComplexityReport, + generateComplexityAnalysisPrompt, confirmTaskOverwrite }; diff --git a/scripts/modules/utils.js b/scripts/modules/utils.js index ee14cc9d..8f738c46 100644 --- a/scripts/modules/utils.js +++ b/scripts/modules/utils.js @@ -6,22 +6,61 @@ import fs from 'fs'; import path from 'path'; import chalk from 'chalk'; +import { ZodError } from 'zod'; +// Import specific config getters needed here +import { getLogLevel, getDebugFlag } from './config-manager.js'; // Global silent mode flag let silentMode = false; -// Configuration and constants -const CONFIG = { - model: process.env.MODEL || 'claude-3-7-sonnet-20250219', - maxTokens: parseInt(process.env.MAX_TOKENS || '4000'), - temperature: parseFloat(process.env.TEMPERATURE || '0.7'), - debug: process.env.DEBUG === 'true', - logLevel: process.env.LOG_LEVEL || 'info', - defaultSubtasks: parseInt(process.env.DEFAULT_SUBTASKS || '3'), - defaultPriority: process.env.DEFAULT_PRIORITY || 'medium', - projectName: process.env.PROJECT_NAME || 'Task Master', - projectVersion: '1.5.0' // Hardcoded version - ALWAYS use this value, ignore environment variable -}; +// --- Environment Variable Resolution Utility --- +/** + * Resolves an environment variable by checking process.env first, then session.env. + * @param {string} varName - The name of the environment variable. + * @param {string|null} session - The MCP session object (optional). + * @returns {string|undefined} The value of the environment variable or undefined if not found. + */ +function resolveEnvVariable(varName, session) { + // Ensure session and session.env exist before attempting access + const sessionValue = + session && session.env ? session.env[varName] : undefined; + return process.env[varName] ?? sessionValue; +} + +// --- Project Root Finding Utility --- +/** + * Finds the project root directory by searching upwards from a given starting point + * for a marker file or directory (e.g., 'package.json', '.git'). + * @param {string} [startPath=process.cwd()] - The directory to start searching from. + * @param {string[]} [markers=['package.json', '.git', '.taskmasterconfig']] - Marker files/dirs to look for. + * @returns {string|null} The path to the project root directory, or null if not found. + */ +function findProjectRoot( + startPath = process.cwd(), + markers = ['package.json', '.git', '.taskmasterconfig'] +) { + let currentPath = path.resolve(startPath); + while (true) { + for (const marker of markers) { + if (fs.existsSync(path.join(currentPath, marker))) { + return currentPath; + } + } + const parentPath = path.dirname(currentPath); + if (parentPath === currentPath) { + // Reached the filesystem root + return null; + } + currentPath = parentPath; + } +} + +// --- Dynamic Configuration Function --- (REMOVED) +/* +function getConfig(session = null) { + // ... implementation removed ... +} +*/ // Set up logging based on log level const LOG_LEVELS = { @@ -73,6 +112,9 @@ function log(level, ...args) { return; } + // Get log level dynamically from config-manager + const configLevel = getLogLevel() || 'info'; // Use getter + // Use text prefixes instead of emojis const prefixes = { debug: chalk.gray('[DEBUG]'), @@ -84,7 +126,6 @@ function log(level, ...args) { // Ensure level exists, default to info if not const currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info'; - const configLevel = CONFIG.logLevel || 'info'; // Ensure configLevel has a default // Check log level configuration if ( @@ -106,12 +147,15 @@ function log(level, ...args) { * @returns {Object|null} Parsed JSON data or null if error occurs */ function readJSON(filepath) { + // Get debug flag dynamically from config-manager + const isDebug = getDebugFlag(); try { const rawData = fs.readFileSync(filepath, 'utf8'); return JSON.parse(rawData); } catch (error) { log('error', `Error reading JSON file ${filepath}:`, error.message); - if (CONFIG.debug) { + if (isDebug) { + // Use dynamic debug flag // Use log utility for debug output too log('error', 'Full error details:', error); } @@ -125,6 +169,8 @@ function readJSON(filepath) { * @param {Object} data - Data to write */ function writeJSON(filepath, data) { + // Get debug flag dynamically from config-manager + const isDebug = getDebugFlag(); try { const dir = path.dirname(filepath); if (!fs.existsSync(dir)) { @@ -133,7 +179,8 @@ function writeJSON(filepath, data) { fs.writeFileSync(filepath, JSON.stringify(data, null, 2), 'utf8'); } catch (error) { log('error', `Error writing JSON file ${filepath}:`, error.message); - if (CONFIG.debug) { + if (isDebug) { + // Use dynamic debug flag // Use log utility for debug output too log('error', 'Full error details:', error); } @@ -156,6 +203,8 @@ function sanitizePrompt(prompt) { * @returns {Object|null} The parsed complexity report or null if not found */ function readComplexityReport(customPath = null) { + // Get debug flag dynamically from config-manager + const isDebug = getDebugFlag(); try { const reportPath = customPath || @@ -168,6 +217,11 @@ function readComplexityReport(customPath = null) { return JSON.parse(reportData); } catch (error) { log('warn', `Could not read complexity report: ${error.message}`); + // Optionally log full error in debug mode + if (isDebug) { + // Use dynamic debug flag + log('error', 'Full error details:', error); + } return null; } } @@ -399,7 +453,8 @@ function detectCamelCaseFlags(args) { // Export all utility functions and configuration export { - CONFIG, + // CONFIG, <-- Already Removed + // getConfig <-- Removing now LOG_LEVELS, log, readJSON, @@ -417,5 +472,8 @@ export { enableSilentMode, disableSilentMode, isSilentMode, - getTaskManager + resolveEnvVariable, + getTaskManager, + findProjectRoot + // getConfig <-- Removed }; diff --git a/src/ai-providers/anthropic.js b/src/ai-providers/anthropic.js new file mode 100644 index 00000000..8bdf2d82 --- /dev/null +++ b/src/ai-providers/anthropic.js @@ -0,0 +1,191 @@ +/** + * src/ai-providers/anthropic.js + * + * Implementation for interacting with Anthropic models (e.g., Claude) + * using the Vercel AI SDK. + */ +import { createAnthropic } from '@ai-sdk/anthropic'; +import { generateText, streamText, generateObject, streamObject } from 'ai'; +import { log } from '../../scripts/modules/utils.js'; // Assuming utils is accessible + +// TODO: Implement standardized functions for generateText, streamText, generateObject + +// --- Client Instantiation --- +// Note: API key resolution should ideally happen closer to the call site +// using the config manager/resolver which checks process.env and session.env. +// This is a placeholder for basic functionality. +// Remove the global variable and caching logic +// let anthropicClient; + +function getClient(apiKey) { + if (!apiKey) { + // In a real scenario, this would use the config resolver. + // Throwing error here if key isn't passed for simplicity. + // Keep the error check for the passed key + throw new Error('Anthropic API key is required.'); + } + // Remove the check for anthropicClient + // if (!anthropicClient) { + // TODO: Explore passing options like default headers if needed + // Create and return a new instance directly + return createAnthropic({ + apiKey: apiKey + }); + // } + // return anthropicClient; +} + +// --- Standardized Service Function Implementations --- + +/** + * Generates text using an Anthropic model. + * + * @param {object} params - Parameters for the text generation. + * @param {string} params.apiKey - The Anthropic API key. + * @param {string} params.modelId - The specific Anthropic model ID to use (e.g., 'claude-3-haiku-20240307'). + * @param {string} params.systemPrompt - The system prompt. + * @param {string} params.userPrompt - The user prompt. + * @param {number} [params.maxTokens] - Maximum tokens for the response. + * @param {number} [params.temperature] - Temperature for generation. + * @returns {Promise} The generated text content. + * @throws {Error} If the API call fails. + */ +export async function generateAnthropicText({ + apiKey, + modelId, + systemPrompt, + userPrompt, + maxTokens, + temperature +}) { + log('debug', `Generating Anthropic text with model: ${modelId}`); + try { + const client = getClient(apiKey); + const result = await generateText({ + model: client(modelId), // Pass the model ID to the client instance + system: systemPrompt, + prompt: userPrompt, + maxTokens: maxTokens, + temperature: temperature + // TODO: Add other relevant parameters like topP, topK if needed + }); + log( + 'debug', + `Anthropic generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}` + ); + return result.text; + } catch (error) { + log('error', `Anthropic generateText failed: ${error.message}`); + // Consider more specific error handling or re-throwing a standardized error + throw error; + } +} + +/** + * Streams text using an Anthropic model. + * + * @param {object} params - Parameters for the text streaming. + * @param {string} params.apiKey - The Anthropic API key. + * @param {string} params.modelId - The specific Anthropic model ID. + * @param {string} params.systemPrompt - The system prompt. + * @param {string} params.userPrompt - The user prompt. + * @param {number} [params.maxTokens] - Maximum tokens for the response. + * @param {number} [params.temperature] - Temperature for generation. + * @returns {Promise>} A readable stream of text deltas. + * @throws {Error} If the API call fails to initiate the stream. + */ +export async function streamAnthropicText({ + apiKey, + modelId, + systemPrompt, + userPrompt, + maxTokens, + temperature +}) { + log('debug', `Streaming Anthropic text with model: ${modelId}`); + try { + const client = getClient(apiKey); + const stream = await streamText({ + model: client(modelId), + system: systemPrompt, + prompt: userPrompt, + maxTokens: maxTokens, + temperature: temperature + // TODO: Add other relevant parameters + }); + + // We return the stream directly. The consumer will handle reading it. + // We could potentially wrap it or add logging within the stream pipe if needed. + return stream.textStream; + } catch (error) { + log('error', `Anthropic streamText failed: ${error.message}`); + throw error; + } +} + +/** + * Generates a structured object using an Anthropic model. + * NOTE: Anthropic's tool/function calling support might have limitations + * compared to OpenAI, especially regarding complex schemas or enforcement. + * The Vercel AI SDK attempts to abstract this. + * + * @param {object} params - Parameters for object generation. + * @param {string} params.apiKey - The Anthropic API key. + * @param {string} params.modelId - The specific Anthropic model ID. + * @param {string} params.systemPrompt - The system prompt (optional). + * @param {string} params.userPrompt - The user prompt describing the desired object. + * @param {import('zod').ZodSchema} params.schema - The Zod schema for the object. + * @param {string} params.objectName - A name for the object/tool. + * @param {number} [params.maxTokens] - Maximum tokens for the response. + * @param {number} [params.temperature] - Temperature for generation. + * @param {number} [params.maxRetries] - Max retries for validation/generation. + * @returns {Promise} The generated object matching the schema. + * @throws {Error} If generation or validation fails. + */ +export async function generateAnthropicObject({ + apiKey, + modelId, + systemPrompt, + userPrompt, + schema, + objectName = 'generated_object', // Provide a default name + maxTokens, + temperature, + maxRetries = 3 +}) { + log( + 'debug', + `Generating Anthropic object ('${objectName}') with model: ${modelId}` + ); + try { + const client = getClient(apiKey); + const result = await generateObject({ + model: client(modelId), + mode: 'tool', // Anthropic generally uses 'tool' mode for structured output + schema: schema, + system: systemPrompt, + prompt: userPrompt, + tool: { + name: objectName, // Use the provided or default name + description: `Generate a ${objectName} based on the prompt.` // Simple description + }, + maxTokens: maxTokens, + temperature: temperature, + maxRetries: maxRetries + }); + log( + 'debug', + `Anthropic generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}` + ); + return result.object; + } catch (error) { + log( + 'error', + `Anthropic generateObject ('${objectName}') failed: ${error.message}` + ); + throw error; + } +} + +// TODO: Implement streamAnthropicObject if needed and supported well by the SDK for Anthropic. +// The basic structure would be similar to generateAnthropicObject but using streamObject. diff --git a/src/ai-providers/perplexity.js b/src/ai-providers/perplexity.js new file mode 100644 index 00000000..4fad6c32 --- /dev/null +++ b/src/ai-providers/perplexity.js @@ -0,0 +1,176 @@ +/** + * src/ai-providers/perplexity.js + * + * Implementation for interacting with Perplexity models + * using the Vercel AI SDK. + */ +import { createPerplexity } from '@ai-sdk/perplexity'; +import { generateText, streamText, generateObject, streamObject } from 'ai'; +import { log } from '../../scripts/modules/utils.js'; + +// --- Client Instantiation --- +// Similar to Anthropic, this expects the resolved API key to be passed in. +function getClient(apiKey) { + if (!apiKey) { + throw new Error('Perplexity API key is required.'); + } + // Create and return a new instance directly + return createPerplexity({ + apiKey: apiKey + }); +} + +// --- Standardized Service Function Implementations --- + +/** + * Generates text using a Perplexity model. + * + * @param {object} params - Parameters for text generation. + * @param {string} params.apiKey - The Perplexity API key. + * @param {string} params.modelId - The Perplexity model ID (e.g., 'sonar-small-32k-online'). + * @param {string} [params.systemPrompt] - The system prompt (optional for some models). + * @param {string} params.userPrompt - The user prompt. + * @param {number} [params.maxTokens] - Maximum tokens. + * @param {number} [params.temperature] - Temperature. + * @returns {Promise} Generated text. + */ +export async function generatePerplexityText({ + apiKey, + modelId, + systemPrompt, + userPrompt, + maxTokens, + temperature +}) { + log('debug', `Generating Perplexity text with model: ${modelId}`); + try { + const client = getClient(apiKey); + const result = await generateText({ + model: client(modelId), + system: systemPrompt, // Pass system prompt if provided + prompt: userPrompt, + maxTokens: maxTokens, + temperature: temperature + }); + log( + 'debug', + `Perplexity generateText result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}` + ); + return result.text; + } catch (error) { + log('error', `Perplexity generateText failed: ${error.message}`); + throw error; + } +} + +/** + * Streams text using a Perplexity model. + * + * @param {object} params - Parameters for text streaming. + * @param {string} params.apiKey - The Perplexity API key. + * @param {string} params.modelId - The Perplexity model ID. + * @param {string} [params.systemPrompt] - The system prompt. + * @param {string} params.userPrompt - The user prompt. + * @param {number} [params.maxTokens] - Maximum tokens. + * @param {number} [params.temperature] - Temperature. + * @returns {Promise>} Stream of text deltas. + */ +export async function streamPerplexityText({ + apiKey, + modelId, + systemPrompt, + userPrompt, + maxTokens, + temperature +}) { + log('debug', `Streaming Perplexity text with model: ${modelId}`); + try { + const client = getClient(apiKey); + const stream = await streamText({ + model: client(modelId), + system: systemPrompt, + prompt: userPrompt, + maxTokens: maxTokens, + temperature: temperature + }); + return stream.textStream; + } catch (error) { + log('error', `Perplexity streamText failed: ${error.message}`); + throw error; + } +} + +/** + * Generates a structured object using a Perplexity model. + * Note: Perplexity's support for structured output/tool use might vary. + * We assume it follows OpenAI's function/tool calling conventions if supported by the SDK. + * + * @param {object} params - Parameters for object generation. + * @param {string} params.apiKey - The Perplexity API key. + * @param {string} params.modelId - The Perplexity model ID. + * @param {string} [params.systemPrompt] - System prompt. + * @param {string} params.userPrompt - User prompt. + * @param {import('zod').ZodSchema} params.schema - Zod schema. + * @param {string} params.objectName - Name for the object/tool. + * @param {number} [params.maxTokens] - Maximum tokens. + * @param {number} [params.temperature] - Temperature. + * @param {number} [params.maxRetries] - Max retries. + * @returns {Promise} Generated object. + */ +export async function generatePerplexityObject({ + apiKey, + modelId, + systemPrompt, + userPrompt, + schema, + objectName = 'generated_object', + maxTokens, + temperature, + maxRetries = 3 +}) { + log( + 'debug', + `Generating Perplexity object ('${objectName}') with model: ${modelId}` + ); + try { + const client = getClient(apiKey); + // Assuming Perplexity follows OpenAI-like tool mode if supported by SDK + const result = await generateObject({ + model: client(modelId), + mode: 'tool', + schema: schema, + system: systemPrompt, + prompt: userPrompt, + tool: { + name: objectName, + description: `Generate a ${objectName} based on the prompt.` + }, + maxTokens: maxTokens, + temperature: temperature, + maxRetries: maxRetries + }); + log( + 'debug', + `Perplexity generateObject result received. Tokens: ${result.usage.completionTokens}/${result.usage.promptTokens}` + ); + return result.object; + } catch (error) { + log( + 'error', + `Perplexity generateObject ('${objectName}') failed: ${error.message}` + ); + // Check if the error indicates lack of tool support + if ( + error.message.includes('tool use') || + error.message.includes('structured output') + ) { + log( + 'warn', + `Model ${modelId} might not support structured output via tools.` + ); + } + throw error; + } +} + +// TODO: Implement streamPerplexityObject if needed and supported. diff --git a/tasks/task_061.txt b/tasks/task_061.txt index aa39cf6d..c63845cc 100644 --- a/tasks/task_061.txt +++ b/tasks/task_061.txt @@ -402,7 +402,7 @@ describe('AI Client Factory', () => { ``` -## 4. Develop Centralized AI Services Module [pending] +## 4. Develop Centralized AI Services Module [done] ### Dependencies: 61.3 ### Description: Create a centralized AI services module that abstracts all AI interactions through a unified interface, using the Decorator pattern for adding functionality like logging and retries. ### Details: @@ -415,7 +415,39 @@ describe('AI Client Factory', () => { 7. Implement graceful fallback mechanisms when primary models fail 8. Testing approach: Create unit tests with mocked responses to verify service behavior -## 5. Implement Environment Variable Management [pending] + +Based on the exploration findings, here's additional information for the AI services module refactoring: + +The existing `ai-services.js` should be refactored to: + +1. Leverage the `ai-client-factory.js` for model instantiation while providing a higher-level service abstraction +2. Implement a layered architecture: + - Base service layer handling common functionality (retries, logging, caching) + - Model-specific service implementations extending the base + - Facade pattern to provide a unified API for all consumers + +3. Integration points: + - Replace direct OpenAI client usage with factory-provided clients + - Maintain backward compatibility with existing service consumers + - Add service registration mechanism for new AI providers + +4. Performance considerations: + - Implement request batching for high-volume operations + - Add request priority queuing for critical vs non-critical operations + - Implement circuit breaker pattern to prevent cascading failures + +5. Monitoring enhancements: + - Add detailed telemetry for response times, token usage, and costs + - Implement standardized error classification for better diagnostics + +6. Implementation sequence: + - Start with abstract base service class + - Refactor existing OpenAI implementations + - Add adapter layer for new providers + - Implement the unified facade + + +## 5. Implement Environment Variable Management [done] ### Dependencies: 61.1, 61.3 ### Description: Update environment variable handling to support multiple AI models and create documentation for configuration options. ### Details: @@ -455,7 +487,7 @@ describe('AI Client Factory', () => { 8. Testing approach: Create integration tests that verify model setting functionality with various inputs ## 8. Update Main Task Processing Logic [pending] -### Dependencies: 61.4, 61.5 +### Dependencies: 61.4, 61.5, 61.18 ### Description: Refactor the main task processing logic to use the new AI services module and support dynamic model selection. ### Details: 1. Update task processing functions to use the centralized AI services @@ -467,8 +499,63 @@ describe('AI Client Factory', () => { 7. Implement response validation to ensure quality across different models 8. Testing approach: Create integration tests that verify task processing with different model configurations + +When updating the main task processing logic, implement the following changes to align with the new configuration system: + +1. Replace direct environment variable access with calls to the configuration manager: + ```javascript + // Before + const apiKey = process.env.OPENAI_API_KEY; + const modelId = process.env.MAIN_MODEL || "gpt-4"; + + // After + import { getMainProvider, getMainModelId, getMainMaxTokens, getMainTemperature } from './config-manager.js'; + + const provider = getMainProvider(); + const modelId = getMainModelId(); + const maxTokens = getMainMaxTokens(); + const temperature = getMainTemperature(); + ``` + +2. Implement model fallback logic using the configuration hierarchy: + ```javascript + async function processTaskWithFallback(task) { + try { + return await processWithModel(task, getMainModelId()); + } catch (error) { + logger.warn(`Primary model failed: ${error.message}`); + const fallbackModel = getMainFallbackModelId(); + if (fallbackModel) { + return await processWithModel(task, fallbackModel); + } + throw error; + } + } + ``` + +3. Add configuration-aware telemetry points to track model usage and performance: + ```javascript + function trackModelPerformance(modelId, startTime, success) { + const duration = Date.now() - startTime; + telemetry.trackEvent('model_usage', { + modelId, + provider: getMainProvider(), + duration, + success, + configVersion: getConfigVersion() + }); + } + ``` + +4. Ensure all prompt templates are loaded through the configuration system rather than hardcoded: + ```javascript + const promptTemplate = getPromptTemplate('task_processing'); + const prompt = formatPrompt(promptTemplate, { task: taskData }); + ``` + + ## 9. Update Research Processing Logic [pending] -### Dependencies: 61.4, 61.5, 61.8 +### Dependencies: 61.4, 61.5, 61.8, 61.18 ### Description: Refactor the research processing logic to use the new AI services module and support dynamic model selection for research operations. ### Details: 1. Update research functions to use the centralized AI services @@ -480,6 +567,81 @@ describe('AI Client Factory', () => { 7. Create fallback mechanisms for research operations 8. Testing approach: Create integration tests that verify research functionality with different model configurations + +When implementing the refactored research processing logic, ensure the following: + +1. Replace direct environment variable access with the new configuration system: + ```javascript + // Old approach + const apiKey = process.env.OPENAI_API_KEY; + const model = "gpt-4"; + + // New approach + import { getResearchProvider, getResearchModelId, getResearchMaxTokens, + getResearchTemperature } from './config-manager.js'; + + const provider = getResearchProvider(); + const modelId = getResearchModelId(); + const maxTokens = getResearchMaxTokens(); + const temperature = getResearchTemperature(); + ``` + +2. Implement model fallback chains using the configuration system: + ```javascript + async function performResearch(query) { + try { + return await callAIService({ + provider: getResearchProvider(), + modelId: getResearchModelId(), + maxTokens: getResearchMaxTokens(), + temperature: getResearchTemperature() + }); + } catch (error) { + logger.warn(`Primary research model failed: ${error.message}`); + return await callAIService({ + provider: getResearchProvider('fallback'), + modelId: getResearchModelId('fallback'), + maxTokens: getResearchMaxTokens('fallback'), + temperature: getResearchTemperature('fallback') + }); + } + } + ``` + +3. Add support for dynamic parameter adjustment based on research type: + ```javascript + function getResearchParameters(researchType) { + // Get base parameters + const baseParams = { + provider: getResearchProvider(), + modelId: getResearchModelId(), + maxTokens: getResearchMaxTokens(), + temperature: getResearchTemperature() + }; + + // Adjust based on research type + switch(researchType) { + case 'deep': + return {...baseParams, maxTokens: baseParams.maxTokens * 1.5}; + case 'creative': + return {...baseParams, temperature: Math.min(baseParams.temperature + 0.2, 1.0)}; + case 'factual': + return {...baseParams, temperature: Math.max(baseParams.temperature - 0.2, 0)}; + default: + return baseParams; + } + } + ``` + +4. Ensure the caching mechanism uses configuration-based TTL settings: + ```javascript + const researchCache = new Cache({ + ttl: getResearchCacheTTL(), + maxSize: getResearchCacheMaxSize() + }); + ``` + + ## 10. Create Comprehensive Documentation and Examples [pending] ### Dependencies: 61.6, 61.7, 61.8, 61.9 ### Description: Develop comprehensive documentation for the new model management features, including examples, troubleshooting guides, and best practices. @@ -493,3 +655,851 @@ describe('AI Client Factory', () => { 7. Create comparison chart of model capabilities and limitations 8. Testing approach: Conduct user testing with the documentation to ensure clarity and completeness + +## Documentation Update for Configuration System Refactoring + +### Configuration System Architecture +- Document the separation between environment variables and configuration file: + - API keys: Sourced exclusively from environment variables (process.env or session.env) + - All other settings: Centralized in `.taskmasterconfig` JSON file + +### `.taskmasterconfig` Structure +```json +{ + "models": { + "completion": "gpt-3.5-turbo", + "chat": "gpt-4", + "embedding": "text-embedding-ada-002" + }, + "parameters": { + "temperature": 0.7, + "maxTokens": 2000, + "topP": 1 + }, + "logging": { + "enabled": true, + "level": "info" + }, + "defaults": { + "outputFormat": "markdown" + } +} +``` + +### Configuration Access Patterns +- Document the getter functions in `config-manager.js`: + - `getModelForRole(role)`: Returns configured model for a specific role + - `getParameter(name)`: Retrieves model parameters + - `getLoggingConfig()`: Access logging settings + - Example usage: `const completionModel = getModelForRole('completion')` + +### Environment Variable Resolution +- Explain the `resolveEnvVariable(key)` function: + - Checks both process.env and session.env + - Prioritizes session variables over process variables + - Returns null if variable not found + +### Configuration Precedence +- Document the order of precedence: + 1. Command-line arguments (highest priority) + 2. Session environment variables + 3. Process environment variables + 4. `.taskmasterconfig` settings + 5. Hardcoded defaults (lowest priority) + +### Migration Guide +- Steps for users to migrate from previous configuration approach +- How to verify configuration is correctly loaded + + +## 11. Refactor PRD Parsing to use generateObjectService [pending] +### Dependencies: 61.23 +### Description: Update PRD processing logic (callClaude, processClaudeResponse, handleStreamingRequest in ai-services.js) to use the new `generateObjectService` from `ai-services-unified.js` with an appropriate Zod schema. +### Details: + + + +The PRD parsing refactoring should align with the new configuration system architecture. When implementing this change: + +1. Replace direct environment variable access with `resolveEnvVariable` calls for API keys. + +2. Remove any hardcoded model names or parameters in the PRD processing functions. Instead, use the config-manager.js getters: + - `getModelForRole('prd')` to determine the appropriate model + - `getModelParameters('prd')` to retrieve temperature, maxTokens, etc. + +3. When constructing the generateObjectService call, ensure parameters are sourced from config: +```javascript +const modelConfig = getModelParameters('prd'); +const model = getModelForRole('prd'); + +const result = await generateObjectService({ + model, + temperature: modelConfig.temperature, + maxTokens: modelConfig.maxTokens, + // other parameters as needed + schema: prdSchema, + // existing prompt/context parameters +}); +``` + +4. Update any logging to respect the logging configuration from config-manager (e.g., `isLoggingEnabled('ai')`) + +5. Ensure any default values previously hardcoded are now retrieved from the configuration system. + + +## 12. Refactor Basic Subtask Generation to use generateObjectService [pending] +### Dependencies: 61.23 +### Description: Update the `generateSubtasks` function in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the subtask array. +### Details: + + + +The refactoring should leverage the new configuration system: + +1. Replace direct model references with calls to config-manager.js getters: + ```javascript + const { getModelForRole, getModelParams } = require('./config-manager'); + + // Instead of hardcoded models/parameters: + const model = getModelForRole('subtask-generator'); + const modelParams = getModelParams('subtask-generator'); + ``` + +2. Update API key handling to use the resolveEnvVariable pattern: + ```javascript + const { resolveEnvVariable } = require('./utils'); + const apiKey = resolveEnvVariable('OPENAI_API_KEY'); + ``` + +3. When calling generateObjectService, pass the configuration parameters: + ```javascript + const result = await generateObjectService({ + schema: subtasksArraySchema, + prompt: subtaskPrompt, + model: model, + temperature: modelParams.temperature, + maxTokens: modelParams.maxTokens, + // Other parameters from config + }); + ``` + +4. Add error handling that respects logging configuration: + ```javascript + const { isLoggingEnabled } = require('./config-manager'); + + try { + // Generation code + } catch (error) { + if (isLoggingEnabled('errors')) { + console.error('Subtask generation error:', error); + } + throw error; + } + ``` + + +## 13. Refactor Research Subtask Generation to use generateObjectService [pending] +### Dependencies: 61.23 +### Description: Update the `generateSubtasksWithPerplexity` function in `ai-services.js` to first perform research (potentially keeping the Perplexity call separate or adapting it) and then use `generateObjectService` from `ai-services-unified.js` with research results included in the prompt. +### Details: + + + +The refactoring should align with the new configuration system by: + +1. Replace direct environment variable access with `resolveEnvVariable` for API keys +2. Use the config-manager.js getters to retrieve model parameters: + - Replace hardcoded model names with `getModelForRole('research')` + - Use `getParametersForRole('research')` to get temperature, maxTokens, etc. +3. Implement proper error handling that respects the `getLoggingConfig()` settings +4. Example implementation pattern: +```javascript +const { getModelForRole, getParametersForRole, getLoggingConfig } = require('./config-manager'); +const { resolveEnvVariable } = require('./environment-utils'); + +// In the refactored function: +const researchModel = getModelForRole('research'); +const { temperature, maxTokens } = getParametersForRole('research'); +const apiKey = resolveEnvVariable('PERPLEXITY_API_KEY'); +const { verbose } = getLoggingConfig(); + +// Then use these variables in the API call configuration +``` +5. Ensure the transition to generateObjectService maintains all existing functionality while leveraging the new configuration system + + +## 14. Refactor Research Task Description Generation to use generateObjectService [pending] +### Dependencies: 61.23 +### Description: Update the `generateTaskDescriptionWithPerplexity` function in `ai-services.js` to first perform research and then use `generateObjectService` from `ai-services-unified.js` to generate the structured task description. +### Details: + + + +The refactoring should incorporate the new configuration management system: + +1. Update imports to include the config-manager: +```javascript +const { getModelForRole, getParametersForRole } = require('./config-manager'); +``` + +2. Replace any hardcoded model selections or parameters with config-manager calls: +```javascript +// Replace direct model references like: +// const model = "perplexity-model-7b-online" +// With: +const model = getModelForRole('research'); +const parameters = getParametersForRole('research'); +``` + +3. For API key handling, use the resolveEnvVariable pattern: +```javascript +const apiKey = resolveEnvVariable('PERPLEXITY_API_KEY'); +``` + +4. When calling generateObjectService, pass the configuration-derived parameters: +```javascript +return generateObjectService({ + prompt: researchResults, + schema: taskDescriptionSchema, + role: 'taskDescription', + // Config-driven parameters will be applied within generateObjectService +}); +``` + +5. Remove any hardcoded configuration values, ensuring all settings are retrieved from the centralized configuration system. + + +## 15. Refactor Complexity Analysis AI Call to use generateObjectService [pending] +### Dependencies: 61.23 +### Description: Update the logic that calls the AI after using `generateComplexityAnalysisPrompt` in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the complexity report. +### Details: + + + +The complexity analysis AI call should be updated to align with the new configuration system architecture. When refactoring to use `generateObjectService`, implement the following changes: + +1. Replace direct model references with calls to the appropriate config getter: + ```javascript + const modelName = getComplexityAnalysisModel(); // Use the specific getter from config-manager.js + ``` + +2. Retrieve AI parameters from the config system: + ```javascript + const temperature = getAITemperature('complexityAnalysis'); + const maxTokens = getAIMaxTokens('complexityAnalysis'); + ``` + +3. When constructing the call to `generateObjectService`, pass these configuration values: + ```javascript + const result = await generateObjectService({ + prompt, + schema: complexityReportSchema, + modelName, + temperature, + maxTokens, + sessionEnv: session?.env + }); + ``` + +4. Ensure API key resolution uses the `resolveEnvVariable` helper: + ```javascript + // Don't hardcode API keys or directly access process.env + // The generateObjectService should handle this internally with resolveEnvVariable + ``` + +5. Add logging configuration based on settings: + ```javascript + const enableLogging = getAILoggingEnabled('complexityAnalysis'); + if (enableLogging) { + // Use the logging mechanism defined in the configuration + } + ``` + + +## 16. Refactor Task Addition AI Call to use generateObjectService [pending] +### Dependencies: 61.23 +### Description: Update the logic that calls the AI after using `_buildAddTaskPrompt` in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the single task object. +### Details: + + + +To implement this refactoring, you'll need to: + +1. Replace direct AI calls with the new `generateObjectService` approach: + ```javascript + // OLD approach + const aiResponse = await callLLM(prompt, modelName, temperature, maxTokens); + const task = parseAIResponseToTask(aiResponse); + + // NEW approach using generateObjectService with config-manager + import { generateObjectService } from '../services/ai-services-unified.js'; + import { getAIModelForRole, getAITemperature, getAIMaxTokens } from '../config/config-manager.js'; + import { taskSchema } from '../schemas/task-schema.js'; // Create this Zod schema for a single task + + const modelName = getAIModelForRole('taskCreation'); + const temperature = getAITemperature('taskCreation'); + const maxTokens = getAIMaxTokens('taskCreation'); + + const task = await generateObjectService({ + prompt: _buildAddTaskPrompt(...), + schema: taskSchema, + modelName, + temperature, + maxTokens + }); + ``` + +2. Create a Zod schema for the task object in a new file `schemas/task-schema.js` that defines the expected structure. + +3. Ensure API key resolution uses the new pattern: + ```javascript + // This happens inside generateObjectService, but verify it uses: + import { resolveEnvVariable } from '../config/config-manager.js'; + // Instead of direct process.env access + ``` + +4. Update any error handling to match the new service's error patterns. + + +## 17. Refactor General Chat/Update AI Calls [pending] +### Dependencies: 61.23 +### Description: Refactor functions like `sendChatWithContext` (and potentially related task update functions in `task-manager.js` if they make direct AI calls) to use `streamTextService` or `generateTextService` from `ai-services-unified.js`. +### Details: + + + +When refactoring `sendChatWithContext` and related functions, ensure they align with the new configuration system: + +1. Replace direct model references with config getter calls: + ```javascript + // Before + const model = "gpt-4"; + + // After + import { getModelForRole } from './config-manager.js'; + const model = getModelForRole('chat'); // or appropriate role + ``` + +2. Extract AI parameters from config rather than hardcoding: + ```javascript + import { getAIParameters } from './config-manager.js'; + const { temperature, maxTokens } = getAIParameters('chat'); + ``` + +3. When calling `streamTextService` or `generateTextService`, pass parameters from config: + ```javascript + await streamTextService({ + messages, + model: getModelForRole('chat'), + temperature: getAIParameters('chat').temperature, + // other parameters as needed + }); + ``` + +4. For logging control, check config settings: + ```javascript + import { isLoggingEnabled } from './config-manager.js'; + + if (isLoggingEnabled('aiCalls')) { + console.log('AI request:', messages); + } + ``` + +5. Ensure any default behaviors respect configuration defaults rather than hardcoded values. + + +## 18. Refactor Callers of AI Parsing Utilities [pending] +### Dependencies: 61.11,61.12,61.13,61.14,61.15,61.16,61.17,61.19 +### Description: Update the code that calls `parseSubtasksFromText`, `parseTaskJsonResponse`, and `parseTasksFromCompletion` to instead directly handle the structured JSON output provided by `generateObjectService` (as the refactored AI calls will now use it). +### Details: + + + +The refactoring of callers to AI parsing utilities should align with the new configuration system. When updating these callers: + +1. Replace direct API key references with calls to the configuration system using `resolveEnvVariable` for sensitive credentials. + +2. Update model selection logic to use the centralized configuration from `.taskmasterconfig` via the getter functions in `config-manager.js`. For example: + ```javascript + // Old approach + const model = "gpt-4"; + + // New approach + import { getModelForRole } from './config-manager'; + const model = getModelForRole('parsing'); // or appropriate role + ``` + +3. Similarly, replace hardcoded parameters with configuration-based values: + ```javascript + // Old approach + const maxTokens = 2000; + const temperature = 0.2; + + // New approach + import { getAIParameterValue } from './config-manager'; + const maxTokens = getAIParameterValue('maxTokens', 'parsing'); + const temperature = getAIParameterValue('temperature', 'parsing'); + ``` + +4. Ensure logging behavior respects the centralized logging configuration settings. + +5. When calling `generateObjectService`, pass the appropriate configuration context to ensure it uses the correct settings from the centralized configuration system. + + +## 19. Refactor `updateSubtaskById` AI Call [pending] +### Dependencies: 61.23 +### Description: Refactor the AI call within `updateSubtaskById` in `task-manager.js` (which generates additional information based on a prompt) to use the appropriate unified service function (e.g., `generateTextService`) from `ai-services-unified.js`. +### Details: + + + +The `updateSubtaskById` function currently makes direct AI calls with hardcoded parameters. When refactoring to use the unified service: + +1. Replace direct OpenAI calls with `generateTextService` from `ai-services-unified.js` +2. Use configuration parameters from `config-manager.js`: + - Replace hardcoded model with `getMainModel()` + - Use `getMainMaxTokens()` for token limits + - Apply `getMainTemperature()` for response randomness +3. Ensure prompt construction remains consistent but passes these dynamic parameters +4. Handle API key resolution through the unified service (which uses `resolveEnvVariable`) +5. Update error handling to work with the unified service response format +6. If the function uses any logging, ensure it respects `getLoggingEnabled()` setting + +Example refactoring pattern: +```javascript +// Before +const completion = await openai.chat.completions.create({ + model: "gpt-4", + temperature: 0.7, + max_tokens: 1000, + messages: [/* prompt messages */] +}); + +// After +const completion = await generateTextService({ + model: getMainModel(), + temperature: getMainTemperature(), + max_tokens: getMainMaxTokens(), + messages: [/* prompt messages */] +}); +``` + + +## 20. Implement `anthropic.js` Provider Module using Vercel AI SDK [done] +### Dependencies: None +### Description: Create and implement the `anthropic.js` module within `src/ai-providers/`. This module should contain functions to interact with the Anthropic API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. +### Details: + + +## 21. Implement `perplexity.js` Provider Module using Vercel AI SDK [done] +### Dependencies: None +### Description: Create and implement the `perplexity.js` module within `src/ai-providers/`. This module should contain functions to interact with the Perplexity API (likely using their OpenAI-compatible endpoint) via the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. +### Details: + + +## 22. Implement `openai.js` Provider Module using Vercel AI SDK [pending] +### Dependencies: None +### Description: Create and implement the `openai.js` module within `src/ai-providers/`. This module should contain functions to interact with the OpenAI API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. (Optional, implement if OpenAI models are needed). +### Details: + + +## 23. Implement Conditional Provider Logic in `ai-services-unified.js` [pending] +### Dependencies: 61.20,61.21,61.22,61.24,61.25,61.26,61.27,61.28,61.29,61.30,61.34 +### Description: Implement logic within the functions of `ai-services-unified.js` (e.g., `generateTextService`, `generateObjectService`, `streamChatService`) to dynamically select and call the appropriate provider module (`anthropic.js`, `perplexity.js`, etc.) based on configuration (e.g., environment variables like `AI_PROVIDER` and `AI_MODEL` from `process.env` or `session.env`). +### Details: + + + +The unified service should now use the configuration manager for provider selection rather than directly accessing environment variables. Here's the implementation approach: + +1. Import the config-manager functions: +```javascript +const { + getMainProvider, + getResearchProvider, + getFallbackProvider, + getModelForRole, + getProviderParameters +} = require('./config-manager'); +``` + +2. Implement provider selection based on context/role: +```javascript +function selectProvider(role = 'default', context = {}) { + // Try to get provider based on role or context + let provider; + + if (role === 'research') { + provider = getResearchProvider(); + } else if (context.fallback) { + provider = getFallbackProvider(); + } else { + provider = getMainProvider(); + } + + // Dynamically import the provider module + return require(`./${provider}.js`); +} +``` + +3. Update service functions to use this selection logic: +```javascript +async function generateTextService(prompt, options = {}) { + const { role = 'default', ...otherOptions } = options; + const provider = selectProvider(role, options); + const model = getModelForRole(role); + const parameters = getProviderParameters(provider.name); + + return provider.generateText(prompt, { + model, + ...parameters, + ...otherOptions + }); +} +``` + +4. Implement fallback logic for service resilience: +```javascript +async function executeWithFallback(serviceFunction, ...args) { + try { + return await serviceFunction(...args); + } catch (error) { + console.error(`Primary provider failed: ${error.message}`); + const fallbackProvider = require(`./${getFallbackProvider()}.js`); + return fallbackProvider[serviceFunction.name](...args); + } +} +``` + +5. Add provider capability checking to prevent calling unsupported features: +```javascript +function checkProviderCapability(provider, capability) { + const capabilities = { + 'anthropic': ['text', 'chat', 'stream'], + 'perplexity': ['text', 'chat', 'stream', 'research'], + 'openai': ['text', 'chat', 'stream', 'embedding', 'vision'] + // Add other providers as needed + }; + + return capabilities[provider]?.includes(capability) || false; +} +``` + + +## 24. Implement `google.js` Provider Module using Vercel AI SDK [pending] +### Dependencies: None +### Description: Create and implement the `google.js` module within `src/ai-providers/`. This module should contain functions to interact with Google AI models (e.g., Gemini) using the **Vercel AI SDK (`@ai-sdk/google`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`. +### Details: + + +## 25. Implement `ollama.js` Provider Module [pending] +### Dependencies: None +### Description: Create and implement the `ollama.js` module within `src/ai-providers/`. This module should contain functions to interact with local Ollama models using the **`ollama-ai-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used. +### Details: + + +## 26. Implement `mistral.js` Provider Module using Vercel AI SDK [pending] +### Dependencies: None +### Description: Create and implement the `mistral.js` module within `src/ai-providers/`. This module should contain functions to interact with Mistral AI models using the **Vercel AI SDK (`@ai-sdk/mistral`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`. +### Details: + + +## 27. Implement `azure.js` Provider Module using Vercel AI SDK [pending] +### Dependencies: None +### Description: Create and implement the `azure.js` module within `src/ai-providers/`. This module should contain functions to interact with Azure OpenAI models using the **Vercel AI SDK (`@ai-sdk/azure`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`. +### Details: + + +## 28. Implement `openrouter.js` Provider Module [pending] +### Dependencies: None +### Description: Create and implement the `openrouter.js` module within `src/ai-providers/`. This module should contain functions to interact with various models via OpenRouter using the **`@openrouter/ai-sdk-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used. +### Details: + + +## 29. Implement `xai.js` Provider Module using Vercel AI SDK [pending] +### Dependencies: None +### Description: Create and implement the `xai.js` module within `src/ai-providers/`. This module should contain functions to interact with xAI models (e.g., Grok) using the **Vercel AI SDK (`@ai-sdk/xai`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`. +### Details: + + +## 30. Update Configuration Management for AI Providers [pending] +### Dependencies: None +### Description: Update `config-manager.js` and related configuration logic/documentation to support the new provider/model selection mechanism for `ai-services-unified.js` (e.g., using `AI_PROVIDER`, `AI_MODEL` env vars from `process.env` or `session.env`), ensuring compatibility with existing role-based selection if needed. +### Details: + + + +```javascript +// Implementation details for config-manager.js updates + +/** + * Unified configuration resolution function that checks multiple sources in priority order: + * 1. process.env + * 2. session.env (if available) + * 3. Default values from .taskmasterconfig + * + * @param {string} key - Configuration key to resolve + * @param {object} session - Optional session object that may contain env values + * @param {*} defaultValue - Default value if not found in any source + * @returns {*} Resolved configuration value + */ +function resolveConfig(key, session = null, defaultValue = null) { + return process.env[key] ?? session?.env?.[key] ?? defaultValue; +} + +// AI provider/model resolution with fallback to role-based selection +function resolveAIConfig(session = null, role = 'default') { + const provider = resolveConfig('AI_PROVIDER', session); + const model = resolveConfig('AI_MODEL', session); + + // If explicit provider/model specified, use those + if (provider && model) { + return { provider, model }; + } + + // Otherwise fall back to role-based configuration + const roleConfig = getRoleBasedAIConfig(role); + return { + provider: provider || roleConfig.provider, + model: model || roleConfig.model + }; +} + +// Example usage in ai-services-unified.js: +// const { provider, model } = resolveAIConfig(session, role); +// const client = getProviderClient(provider, resolveConfig(`${provider.toUpperCase()}_API_KEY`, session)); + +/** + * Configuration Resolution Documentation: + * + * 1. Environment Variables: + * - AI_PROVIDER: Explicitly sets the AI provider (e.g., 'openai', 'anthropic') + * - AI_MODEL: Explicitly sets the model to use (e.g., 'gpt-4', 'claude-2') + * - OPENAI_API_KEY, ANTHROPIC_API_KEY, etc.: Provider-specific API keys + * + * 2. Resolution Strategy: + * - Values are first checked in process.env + * - If not found, session.env is checked (when available) + * - If still not found, defaults from .taskmasterconfig are used + * - For AI provider/model, explicit settings override role-based configuration + * + * 3. Backward Compatibility: + * - Role-based selection continues to work when AI_PROVIDER/AI_MODEL are not set + * - Existing code using getRoleBasedAIConfig() will continue to function + */ +``` + + + + +```javascript +/** + * Refactored configuration management implementation + */ + +// Core configuration getters - replace direct CONFIG access +const getMainProvider = () => resolveConfig('AI_PROVIDER', null, CONFIG.ai?.mainProvider || 'openai'); +const getMainModel = () => resolveConfig('AI_MODEL', null, CONFIG.ai?.mainModel || 'gpt-4'); +const getLogLevel = () => resolveConfig('LOG_LEVEL', null, CONFIG.logging?.level || 'info'); +const getMaxTokens = (role = 'default') => { + const explicitMaxTokens = parseInt(resolveConfig('MAX_TOKENS', null, 0), 10); + if (explicitMaxTokens > 0) return explicitMaxTokens; + + // Fall back to role-based configuration + return CONFIG.ai?.roles?.[role]?.maxTokens || CONFIG.ai?.defaultMaxTokens || 4096; +}; + +// API key resolution - separate from general configuration +function resolveEnvVariable(key, session = null) { + return process.env[key] ?? session?.env?.[key] ?? null; +} + +function isApiKeySet(provider, session = null) { + const keyName = `${provider.toUpperCase()}_API_KEY`; + return Boolean(resolveEnvVariable(keyName, session)); +} + +/** + * Migration guide for application components: + * + * 1. Replace direct CONFIG access: + * - Before: `const provider = CONFIG.ai.mainProvider;` + * - After: `const provider = getMainProvider();` + * + * 2. Replace direct process.env access for API keys: + * - Before: `const apiKey = process.env.OPENAI_API_KEY;` + * - After: `const apiKey = resolveEnvVariable('OPENAI_API_KEY', session);` + * + * 3. Check API key availability: + * - Before: `if (process.env.OPENAI_API_KEY) {...}` + * - After: `if (isApiKeySet('openai', session)) {...}` + * + * 4. Update provider/model selection in ai-services: + * - Before: + * ``` + * const provider = role ? CONFIG.ai.roles[role]?.provider : CONFIG.ai.mainProvider; + * const model = role ? CONFIG.ai.roles[role]?.model : CONFIG.ai.mainModel; + * ``` + * - After: + * ``` + * const { provider, model } = resolveAIConfig(session, role); + * ``` + */ + +// Update .taskmasterconfig schema documentation +const configSchema = { + "ai": { + "mainProvider": "Default AI provider (overridden by AI_PROVIDER env var)", + "mainModel": "Default AI model (overridden by AI_MODEL env var)", + "defaultMaxTokens": "Default max tokens (overridden by MAX_TOKENS env var)", + "roles": { + "role_name": { + "provider": "Provider for this role (fallback if AI_PROVIDER not set)", + "model": "Model for this role (fallback if AI_MODEL not set)", + "maxTokens": "Max tokens for this role (fallback if MAX_TOKENS not set)" + } + } + }, + "logging": { + "level": "Logging level (overridden by LOG_LEVEL env var)" + } +}; +``` + +Implementation notes: +1. All configuration getters should provide environment variable override capability first, then fall back to .taskmasterconfig values +2. API key resolution should be kept separate from general configuration to maintain security boundaries +3. Update all application components to use these new getters rather than accessing CONFIG or process.env directly +4. Document the priority order (env vars > session.env > .taskmasterconfig) in JSDoc comments +5. Ensure backward compatibility by maintaining support for role-based configuration when explicit env vars aren't set + + + +## 31. Implement Integration Tests for Unified AI Service [pending] +### Dependencies: 61.18 +### Description: Implement integration tests for `ai-services-unified.js`. These tests should verify the correct routing to different provider modules based on configuration and ensure the unified service functions (`generateTextService`, `generateObjectService`, etc.) work correctly when called from modules like `task-manager.js`. +### Details: + + + +For the integration tests of the Unified AI Service, consider the following implementation details: + +1. Setup test fixtures: + - Create a mock `.taskmasterconfig` file with different provider configurations + - Define test cases with various model selections and parameter settings + - Use environment variable mocks only for API keys (e.g., `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`) + +2. Test configuration resolution: + - Verify that `ai-services-unified.js` correctly retrieves settings from `config-manager.js` + - Test that model selection follows the hierarchy defined in `.taskmasterconfig` + - Ensure fallback mechanisms work when primary providers are unavailable + +3. Mock the provider modules: + ```javascript + jest.mock('../services/openai-service.js'); + jest.mock('../services/anthropic-service.js'); + ``` + +4. Test specific scenarios: + - Provider selection based on configured preferences + - Parameter inheritance from config (temperature, maxTokens) + - Error handling when API keys are missing + - Proper routing when specific models are requested + +5. Verify integration with task-manager: + ```javascript + test('task-manager correctly uses unified AI service with config-based settings', async () => { + // Setup mock config with specific settings + mockConfigManager.getAIProviderPreference.mockReturnValue(['openai', 'anthropic']); + mockConfigManager.getModelForRole.mockReturnValue('gpt-4'); + mockConfigManager.getParametersForModel.mockReturnValue({ temperature: 0.7, maxTokens: 2000 }); + + // Verify task-manager uses these settings when calling the unified service + // ... + }); + ``` + +6. Include tests for configuration changes at runtime and their effect on service behavior. + + +## 32. Update Documentation for New AI Architecture [pending] +### Dependencies: 61.31 +### Description: Update relevant documentation files (e.g., `architecture.mdc`, `taskmaster.mdc`, environment variable guides, README) to accurately reflect the new AI service architecture using `ai-services-unified.js`, provider modules, the Vercel AI SDK, and the updated configuration approach. +### Details: + + + +The new AI architecture introduces a clear separation between sensitive credentials and configuration settings: + +## Environment Variables vs Configuration File + +- **Environment Variables (.env)**: + - Store only sensitive API keys and credentials + - Accessed via `resolveEnvVariable()` which checks both process.env and session.env + - Example: `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY` + - No model names, parameters, or non-sensitive settings should be here + +- **.taskmasterconfig File**: + - Central location for all non-sensitive configuration + - Structured JSON with clear sections for different aspects of the system + - Contains: + - Model mappings by role (e.g., `systemModels`, `userModels`) + - Default parameters (temperature, maxTokens, etc.) + - Logging preferences + - Provider-specific settings + - Accessed via getter functions from `config-manager.js` like: + ```javascript + import { getModelForRole, getDefaultTemperature } from './config-manager.js'; + + // Usage examples + const model = getModelForRole('system'); + const temp = getDefaultTemperature(); + ``` + +## Implementation Notes +- Document the structure of `.taskmasterconfig` with examples +- Explain the migration path for users with existing setups +- Include a troubleshooting section for common configuration issues +- Add a configuration validation section explaining how the system verifies settings + + +## 33. Cleanup Old AI Service Files [pending] +### Dependencies: 61.32 +### Description: After all other migration subtasks (refactoring, provider implementation, testing, documentation) are complete and verified, remove the old `ai-services.js` and `ai-client-factory.js` files from the `scripts/modules/` directory. Ensure no code still references them. +### Details: + + +## 34. Audit and Standardize Env Variable Access [pending] +### Dependencies: None +### Description: Audit the entire codebase (core modules, provider modules, utilities) to ensure all accesses to environment variables (API keys, configuration flags) consistently use a standardized resolution function (like `resolveEnvVariable` or a new utility) that checks `process.env` first and then `session.env` if available. Refactor any direct `process.env` access where `session.env` should also be considered. +### Details: + + + +This audit should distinguish between two types of configuration: + +1. **Sensitive credentials (API keys)**: These should exclusively use the `resolveEnvVariable` pattern to check both `process.env` and `session.env`. Verify that no API keys are hardcoded or accessed through direct `process.env` references. + +2. **Application configuration**: All non-credential settings should be migrated to use the centralized `.taskmasterconfig` system via the `config-manager.js` getters. This includes: + - Model selections and role assignments + - Parameter settings (temperature, maxTokens, etc.) + - Logging configuration + - Default behaviors and fallbacks + +Implementation notes: +- Create a comprehensive inventory of all environment variable accesses +- Categorize each as either credential or application configuration +- For credentials: standardize on `resolveEnvVariable` pattern +- For app config: migrate to appropriate `config-manager.js` getter methods +- Document any exceptions that require special handling +- Add validation to prevent regression (e.g., ESLint rules against direct `process.env` access) + +This separation ensures security best practices for credentials while centralizing application configuration for better maintainability. + + +## 35. Review/Refactor MCP Direct Functions for Explicit Config Root Passing [pending] +### Dependencies: None +### Description: Review all functions in mcp-server/src/core/direct-functions/*.js. Ensure that any calls made from these functions to getters in scripts/modules/config-manager.js (e.g., getMainProvider, getDefaultPriority, getLogLevel, etc.) explicitly pass the projectRoot (obtained from the args object, which is derived from the session context) as the first argument to the getter. This prevents the getters from incorrectly falling back to using findProjectRoot() based on the server's cwd when running in an MCP context. This is crucial for loading the correct .taskmasterconfig settings based on the user's project. +### Details: + + diff --git a/tasks/tasks.json b/tasks/tasks.json index 4c48679a..07008b51 100644 --- a/tasks/tasks.json +++ b/tasks/tasks.json @@ -2785,8 +2785,8 @@ "dependencies": [ 3 ], - "details": "1. Create `ai-services.js` module to consolidate all AI model interactions\n2. Implement wrapper functions for text generation and streaming\n3. Add retry mechanisms for handling API rate limits and transient errors\n4. Implement logging for all AI interactions for observability\n5. Create model-specific adapters to normalize responses across different providers\n6. Add caching layer for frequently used responses to optimize performance\n7. Implement graceful fallback mechanisms when primary models fail\n8. Testing approach: Create unit tests with mocked responses to verify service behavior", - "status": "pending", + "details": "1. Create `ai-services.js` module to consolidate all AI model interactions\n2. Implement wrapper functions for text generation and streaming\n3. Add retry mechanisms for handling API rate limits and transient errors\n4. Implement logging for all AI interactions for observability\n5. Create model-specific adapters to normalize responses across different providers\n6. Add caching layer for frequently used responses to optimize performance\n7. Implement graceful fallback mechanisms when primary models fail\n8. Testing approach: Create unit tests with mocked responses to verify service behavior\n\n\nBased on the exploration findings, here's additional information for the AI services module refactoring:\n\nThe existing `ai-services.js` should be refactored to:\n\n1. Leverage the `ai-client-factory.js` for model instantiation while providing a higher-level service abstraction\n2. Implement a layered architecture:\n - Base service layer handling common functionality (retries, logging, caching)\n - Model-specific service implementations extending the base\n - Facade pattern to provide a unified API for all consumers\n\n3. Integration points:\n - Replace direct OpenAI client usage with factory-provided clients\n - Maintain backward compatibility with existing service consumers\n - Add service registration mechanism for new AI providers\n\n4. Performance considerations:\n - Implement request batching for high-volume operations\n - Add request priority queuing for critical vs non-critical operations\n - Implement circuit breaker pattern to prevent cascading failures\n\n5. Monitoring enhancements:\n - Add detailed telemetry for response times, token usage, and costs\n - Implement standardized error classification for better diagnostics\n\n6. Implementation sequence:\n - Start with abstract base service class\n - Refactor existing OpenAI implementations\n - Add adapter layer for new providers\n - Implement the unified facade\n", + "status": "done", "parentTaskId": 61 }, { @@ -2798,7 +2798,7 @@ 3 ], "details": "1. Update `.env.example` with all required API keys for supported models\n2. Implement environment variable validation on startup\n3. Create clear error messages for missing or invalid environment variables\n4. Add support for model-specific configuration options\n5. Document all environment variables and their purposes\n6. Implement a check to ensure required API keys are present for selected models\n7. Add support for optional configuration parameters for each model\n8. Testing approach: Create tests that verify environment variable validation logic", - "status": "pending", + "status": "done", "parentTaskId": 61 }, { @@ -2834,9 +2834,10 @@ "description": "Refactor the main task processing logic to use the new AI services module and support dynamic model selection.", "dependencies": [ 4, - 5 + 5, + "61.18" ], - "details": "1. Update task processing functions to use the centralized AI services\n2. Implement dynamic model selection based on configuration\n3. Add error handling for model-specific failures\n4. Implement graceful degradation when preferred models are unavailable\n5. Update prompts to be model-agnostic where possible\n6. Add telemetry for model performance monitoring\n7. Implement response validation to ensure quality across different models\n8. Testing approach: Create integration tests that verify task processing with different model configurations", + "details": "1. Update task processing functions to use the centralized AI services\n2. Implement dynamic model selection based on configuration\n3. Add error handling for model-specific failures\n4. Implement graceful degradation when preferred models are unavailable\n5. Update prompts to be model-agnostic where possible\n6. Add telemetry for model performance monitoring\n7. Implement response validation to ensure quality across different models\n8. Testing approach: Create integration tests that verify task processing with different model configurations\n\n\nWhen updating the main task processing logic, implement the following changes to align with the new configuration system:\n\n1. Replace direct environment variable access with calls to the configuration manager:\n ```javascript\n // Before\n const apiKey = process.env.OPENAI_API_KEY;\n const modelId = process.env.MAIN_MODEL || \"gpt-4\";\n \n // After\n import { getMainProvider, getMainModelId, getMainMaxTokens, getMainTemperature } from './config-manager.js';\n \n const provider = getMainProvider();\n const modelId = getMainModelId();\n const maxTokens = getMainMaxTokens();\n const temperature = getMainTemperature();\n ```\n\n2. Implement model fallback logic using the configuration hierarchy:\n ```javascript\n async function processTaskWithFallback(task) {\n try {\n return await processWithModel(task, getMainModelId());\n } catch (error) {\n logger.warn(`Primary model failed: ${error.message}`);\n const fallbackModel = getMainFallbackModelId();\n if (fallbackModel) {\n return await processWithModel(task, fallbackModel);\n }\n throw error;\n }\n }\n ```\n\n3. Add configuration-aware telemetry points to track model usage and performance:\n ```javascript\n function trackModelPerformance(modelId, startTime, success) {\n const duration = Date.now() - startTime;\n telemetry.trackEvent('model_usage', {\n modelId,\n provider: getMainProvider(),\n duration,\n success,\n configVersion: getConfigVersion()\n });\n }\n ```\n\n4. Ensure all prompt templates are loaded through the configuration system rather than hardcoded:\n ```javascript\n const promptTemplate = getPromptTemplate('task_processing');\n const prompt = formatPrompt(promptTemplate, { task: taskData });\n ```\n", "status": "pending", "parentTaskId": 61 }, @@ -2847,9 +2848,10 @@ "dependencies": [ 4, 5, - 8 + 8, + "61.18" ], - "details": "1. Update research functions to use the centralized AI services\n2. Implement dynamic model selection for research operations\n3. Add specialized error handling for research-specific issues\n4. Optimize prompts for research-focused models\n5. Implement result caching for research operations\n6. Add support for model-specific research parameters\n7. Create fallback mechanisms for research operations\n8. Testing approach: Create integration tests that verify research functionality with different model configurations", + "details": "1. Update research functions to use the centralized AI services\n2. Implement dynamic model selection for research operations\n3. Add specialized error handling for research-specific issues\n4. Optimize prompts for research-focused models\n5. Implement result caching for research operations\n6. Add support for model-specific research parameters\n7. Create fallback mechanisms for research operations\n8. Testing approach: Create integration tests that verify research functionality with different model configurations\n\n\nWhen implementing the refactored research processing logic, ensure the following:\n\n1. Replace direct environment variable access with the new configuration system:\n ```javascript\n // Old approach\n const apiKey = process.env.OPENAI_API_KEY;\n const model = \"gpt-4\";\n \n // New approach\n import { getResearchProvider, getResearchModelId, getResearchMaxTokens, \n getResearchTemperature } from './config-manager.js';\n \n const provider = getResearchProvider();\n const modelId = getResearchModelId();\n const maxTokens = getResearchMaxTokens();\n const temperature = getResearchTemperature();\n ```\n\n2. Implement model fallback chains using the configuration system:\n ```javascript\n async function performResearch(query) {\n try {\n return await callAIService({\n provider: getResearchProvider(),\n modelId: getResearchModelId(),\n maxTokens: getResearchMaxTokens(),\n temperature: getResearchTemperature()\n });\n } catch (error) {\n logger.warn(`Primary research model failed: ${error.message}`);\n return await callAIService({\n provider: getResearchProvider('fallback'),\n modelId: getResearchModelId('fallback'),\n maxTokens: getResearchMaxTokens('fallback'),\n temperature: getResearchTemperature('fallback')\n });\n }\n }\n ```\n\n3. Add support for dynamic parameter adjustment based on research type:\n ```javascript\n function getResearchParameters(researchType) {\n // Get base parameters\n const baseParams = {\n provider: getResearchProvider(),\n modelId: getResearchModelId(),\n maxTokens: getResearchMaxTokens(),\n temperature: getResearchTemperature()\n };\n \n // Adjust based on research type\n switch(researchType) {\n case 'deep':\n return {...baseParams, maxTokens: baseParams.maxTokens * 1.5};\n case 'creative':\n return {...baseParams, temperature: Math.min(baseParams.temperature + 0.2, 1.0)};\n case 'factual':\n return {...baseParams, temperature: Math.max(baseParams.temperature - 0.2, 0)};\n default:\n return baseParams;\n }\n }\n ```\n\n4. Ensure the caching mechanism uses configuration-based TTL settings:\n ```javascript\n const researchCache = new Cache({\n ttl: getResearchCacheTTL(),\n maxSize: getResearchCacheMaxSize()\n });\n ```\n", "status": "pending", "parentTaskId": 61 }, @@ -2863,9 +2865,260 @@ 8, 9 ], - "details": "1. Update README.md with new model management commands\n2. Create usage examples for all supported models\n3. Document environment variable requirements for each model\n4. Create troubleshooting guide for common issues\n5. Add performance considerations and best practices\n6. Document API key acquisition process for each supported service\n7. Create comparison chart of model capabilities and limitations\n8. Testing approach: Conduct user testing with the documentation to ensure clarity and completeness", + "details": "1. Update README.md with new model management commands\n2. Create usage examples for all supported models\n3. Document environment variable requirements for each model\n4. Create troubleshooting guide for common issues\n5. Add performance considerations and best practices\n6. Document API key acquisition process for each supported service\n7. Create comparison chart of model capabilities and limitations\n8. Testing approach: Conduct user testing with the documentation to ensure clarity and completeness\n\n\n## Documentation Update for Configuration System Refactoring\n\n### Configuration System Architecture\n- Document the separation between environment variables and configuration file:\n - API keys: Sourced exclusively from environment variables (process.env or session.env)\n - All other settings: Centralized in `.taskmasterconfig` JSON file\n\n### `.taskmasterconfig` Structure\n```json\n{\n \"models\": {\n \"completion\": \"gpt-3.5-turbo\",\n \"chat\": \"gpt-4\",\n \"embedding\": \"text-embedding-ada-002\"\n },\n \"parameters\": {\n \"temperature\": 0.7,\n \"maxTokens\": 2000,\n \"topP\": 1\n },\n \"logging\": {\n \"enabled\": true,\n \"level\": \"info\"\n },\n \"defaults\": {\n \"outputFormat\": \"markdown\"\n }\n}\n```\n\n### Configuration Access Patterns\n- Document the getter functions in `config-manager.js`:\n - `getModelForRole(role)`: Returns configured model for a specific role\n - `getParameter(name)`: Retrieves model parameters\n - `getLoggingConfig()`: Access logging settings\n - Example usage: `const completionModel = getModelForRole('completion')`\n\n### Environment Variable Resolution\n- Explain the `resolveEnvVariable(key)` function:\n - Checks both process.env and session.env\n - Prioritizes session variables over process variables\n - Returns null if variable not found\n\n### Configuration Precedence\n- Document the order of precedence:\n 1. Command-line arguments (highest priority)\n 2. Session environment variables\n 3. Process environment variables\n 4. `.taskmasterconfig` settings\n 5. Hardcoded defaults (lowest priority)\n\n### Migration Guide\n- Steps for users to migrate from previous configuration approach\n- How to verify configuration is correctly loaded\n", "status": "pending", "parentTaskId": 61 + }, + { + "id": 11, + "title": "Refactor PRD Parsing to use generateObjectService", + "description": "Update PRD processing logic (callClaude, processClaudeResponse, handleStreamingRequest in ai-services.js) to use the new `generateObjectService` from `ai-services-unified.js` with an appropriate Zod schema.", + "details": "\n\n\nThe PRD parsing refactoring should align with the new configuration system architecture. When implementing this change:\n\n1. Replace direct environment variable access with `resolveEnvVariable` calls for API keys.\n\n2. Remove any hardcoded model names or parameters in the PRD processing functions. Instead, use the config-manager.js getters:\n - `getModelForRole('prd')` to determine the appropriate model\n - `getModelParameters('prd')` to retrieve temperature, maxTokens, etc.\n\n3. When constructing the generateObjectService call, ensure parameters are sourced from config:\n```javascript\nconst modelConfig = getModelParameters('prd');\nconst model = getModelForRole('prd');\n\nconst result = await generateObjectService({\n model,\n temperature: modelConfig.temperature,\n maxTokens: modelConfig.maxTokens,\n // other parameters as needed\n schema: prdSchema,\n // existing prompt/context parameters\n});\n```\n\n4. Update any logging to respect the logging configuration from config-manager (e.g., `isLoggingEnabled('ai')`)\n\n5. Ensure any default values previously hardcoded are now retrieved from the configuration system.\n", + "status": "pending", + "dependencies": [ + "61.23" + ], + "parentTaskId": 61 + }, + { + "id": 12, + "title": "Refactor Basic Subtask Generation to use generateObjectService", + "description": "Update the `generateSubtasks` function in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the subtask array.", + "details": "\n\n\nThe refactoring should leverage the new configuration system:\n\n1. Replace direct model references with calls to config-manager.js getters:\n ```javascript\n const { getModelForRole, getModelParams } = require('./config-manager');\n \n // Instead of hardcoded models/parameters:\n const model = getModelForRole('subtask-generator');\n const modelParams = getModelParams('subtask-generator');\n ```\n\n2. Update API key handling to use the resolveEnvVariable pattern:\n ```javascript\n const { resolveEnvVariable } = require('./utils');\n const apiKey = resolveEnvVariable('OPENAI_API_KEY');\n ```\n\n3. When calling generateObjectService, pass the configuration parameters:\n ```javascript\n const result = await generateObjectService({\n schema: subtasksArraySchema,\n prompt: subtaskPrompt,\n model: model,\n temperature: modelParams.temperature,\n maxTokens: modelParams.maxTokens,\n // Other parameters from config\n });\n ```\n\n4. Add error handling that respects logging configuration:\n ```javascript\n const { isLoggingEnabled } = require('./config-manager');\n \n try {\n // Generation code\n } catch (error) {\n if (isLoggingEnabled('errors')) {\n console.error('Subtask generation error:', error);\n }\n throw error;\n }\n ```\n", + "status": "pending", + "dependencies": [ + "61.23" + ], + "parentTaskId": 61 + }, + { + "id": 13, + "title": "Refactor Research Subtask Generation to use generateObjectService", + "description": "Update the `generateSubtasksWithPerplexity` function in `ai-services.js` to first perform research (potentially keeping the Perplexity call separate or adapting it) and then use `generateObjectService` from `ai-services-unified.js` with research results included in the prompt.", + "details": "\n\n\nThe refactoring should align with the new configuration system by:\n\n1. Replace direct environment variable access with `resolveEnvVariable` for API keys\n2. Use the config-manager.js getters to retrieve model parameters:\n - Replace hardcoded model names with `getModelForRole('research')`\n - Use `getParametersForRole('research')` to get temperature, maxTokens, etc.\n3. Implement proper error handling that respects the `getLoggingConfig()` settings\n4. Example implementation pattern:\n```javascript\nconst { getModelForRole, getParametersForRole, getLoggingConfig } = require('./config-manager');\nconst { resolveEnvVariable } = require('./environment-utils');\n\n// In the refactored function:\nconst researchModel = getModelForRole('research');\nconst { temperature, maxTokens } = getParametersForRole('research');\nconst apiKey = resolveEnvVariable('PERPLEXITY_API_KEY');\nconst { verbose } = getLoggingConfig();\n\n// Then use these variables in the API call configuration\n```\n5. Ensure the transition to generateObjectService maintains all existing functionality while leveraging the new configuration system\n", + "status": "pending", + "dependencies": [ + "61.23" + ], + "parentTaskId": 61 + }, + { + "id": 14, + "title": "Refactor Research Task Description Generation to use generateObjectService", + "description": "Update the `generateTaskDescriptionWithPerplexity` function in `ai-services.js` to first perform research and then use `generateObjectService` from `ai-services-unified.js` to generate the structured task description.", + "details": "\n\n\nThe refactoring should incorporate the new configuration management system:\n\n1. Update imports to include the config-manager:\n```javascript\nconst { getModelForRole, getParametersForRole } = require('./config-manager');\n```\n\n2. Replace any hardcoded model selections or parameters with config-manager calls:\n```javascript\n// Replace direct model references like:\n// const model = \"perplexity-model-7b-online\" \n// With:\nconst model = getModelForRole('research');\nconst parameters = getParametersForRole('research');\n```\n\n3. For API key handling, use the resolveEnvVariable pattern:\n```javascript\nconst apiKey = resolveEnvVariable('PERPLEXITY_API_KEY');\n```\n\n4. When calling generateObjectService, pass the configuration-derived parameters:\n```javascript\nreturn generateObjectService({\n prompt: researchResults,\n schema: taskDescriptionSchema,\n role: 'taskDescription',\n // Config-driven parameters will be applied within generateObjectService\n});\n```\n\n5. Remove any hardcoded configuration values, ensuring all settings are retrieved from the centralized configuration system.\n", + "status": "pending", + "dependencies": [ + "61.23" + ], + "parentTaskId": 61 + }, + { + "id": 15, + "title": "Refactor Complexity Analysis AI Call to use generateObjectService", + "description": "Update the logic that calls the AI after using `generateComplexityAnalysisPrompt` in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the complexity report.", + "details": "\n\n\nThe complexity analysis AI call should be updated to align with the new configuration system architecture. When refactoring to use `generateObjectService`, implement the following changes:\n\n1. Replace direct model references with calls to the appropriate config getter:\n ```javascript\n const modelName = getComplexityAnalysisModel(); // Use the specific getter from config-manager.js\n ```\n\n2. Retrieve AI parameters from the config system:\n ```javascript\n const temperature = getAITemperature('complexityAnalysis');\n const maxTokens = getAIMaxTokens('complexityAnalysis');\n ```\n\n3. When constructing the call to `generateObjectService`, pass these configuration values:\n ```javascript\n const result = await generateObjectService({\n prompt,\n schema: complexityReportSchema,\n modelName,\n temperature,\n maxTokens,\n sessionEnv: session?.env\n });\n ```\n\n4. Ensure API key resolution uses the `resolveEnvVariable` helper:\n ```javascript\n // Don't hardcode API keys or directly access process.env\n // The generateObjectService should handle this internally with resolveEnvVariable\n ```\n\n5. Add logging configuration based on settings:\n ```javascript\n const enableLogging = getAILoggingEnabled('complexityAnalysis');\n if (enableLogging) {\n // Use the logging mechanism defined in the configuration\n }\n ```\n", + "status": "pending", + "dependencies": [ + "61.23" + ], + "parentTaskId": 61 + }, + { + "id": 16, + "title": "Refactor Task Addition AI Call to use generateObjectService", + "description": "Update the logic that calls the AI after using `_buildAddTaskPrompt` in `ai-services.js` to use the new `generateObjectService` from `ai-services-unified.js` with a Zod schema for the single task object.", + "details": "\n\n\nTo implement this refactoring, you'll need to:\n\n1. Replace direct AI calls with the new `generateObjectService` approach:\n ```javascript\n // OLD approach\n const aiResponse = await callLLM(prompt, modelName, temperature, maxTokens);\n const task = parseAIResponseToTask(aiResponse);\n \n // NEW approach using generateObjectService with config-manager\n import { generateObjectService } from '../services/ai-services-unified.js';\n import { getAIModelForRole, getAITemperature, getAIMaxTokens } from '../config/config-manager.js';\n import { taskSchema } from '../schemas/task-schema.js'; // Create this Zod schema for a single task\n \n const modelName = getAIModelForRole('taskCreation');\n const temperature = getAITemperature('taskCreation');\n const maxTokens = getAIMaxTokens('taskCreation');\n \n const task = await generateObjectService({\n prompt: _buildAddTaskPrompt(...),\n schema: taskSchema,\n modelName,\n temperature,\n maxTokens\n });\n ```\n\n2. Create a Zod schema for the task object in a new file `schemas/task-schema.js` that defines the expected structure.\n\n3. Ensure API key resolution uses the new pattern:\n ```javascript\n // This happens inside generateObjectService, but verify it uses:\n import { resolveEnvVariable } from '../config/config-manager.js';\n // Instead of direct process.env access\n ```\n\n4. Update any error handling to match the new service's error patterns.\n", + "status": "pending", + "dependencies": [ + "61.23" + ], + "parentTaskId": 61 + }, + { + "id": 17, + "title": "Refactor General Chat/Update AI Calls", + "description": "Refactor functions like `sendChatWithContext` (and potentially related task update functions in `task-manager.js` if they make direct AI calls) to use `streamTextService` or `generateTextService` from `ai-services-unified.js`.", + "details": "\n\n\nWhen refactoring `sendChatWithContext` and related functions, ensure they align with the new configuration system:\n\n1. Replace direct model references with config getter calls:\n ```javascript\n // Before\n const model = \"gpt-4\";\n \n // After\n import { getModelForRole } from './config-manager.js';\n const model = getModelForRole('chat'); // or appropriate role\n ```\n\n2. Extract AI parameters from config rather than hardcoding:\n ```javascript\n import { getAIParameters } from './config-manager.js';\n const { temperature, maxTokens } = getAIParameters('chat');\n ```\n\n3. When calling `streamTextService` or `generateTextService`, pass parameters from config:\n ```javascript\n await streamTextService({\n messages,\n model: getModelForRole('chat'),\n temperature: getAIParameters('chat').temperature,\n // other parameters as needed\n });\n ```\n\n4. For logging control, check config settings:\n ```javascript\n import { isLoggingEnabled } from './config-manager.js';\n \n if (isLoggingEnabled('aiCalls')) {\n console.log('AI request:', messages);\n }\n ```\n\n5. Ensure any default behaviors respect configuration defaults rather than hardcoded values.\n", + "status": "pending", + "dependencies": [ + "61.23" + ], + "parentTaskId": 61 + }, + { + "id": 18, + "title": "Refactor Callers of AI Parsing Utilities", + "description": "Update the code that calls `parseSubtasksFromText`, `parseTaskJsonResponse`, and `parseTasksFromCompletion` to instead directly handle the structured JSON output provided by `generateObjectService` (as the refactored AI calls will now use it).", + "details": "\n\n\nThe refactoring of callers to AI parsing utilities should align with the new configuration system. When updating these callers:\n\n1. Replace direct API key references with calls to the configuration system using `resolveEnvVariable` for sensitive credentials.\n\n2. Update model selection logic to use the centralized configuration from `.taskmasterconfig` via the getter functions in `config-manager.js`. For example:\n ```javascript\n // Old approach\n const model = \"gpt-4\";\n \n // New approach\n import { getModelForRole } from './config-manager';\n const model = getModelForRole('parsing'); // or appropriate role\n ```\n\n3. Similarly, replace hardcoded parameters with configuration-based values:\n ```javascript\n // Old approach\n const maxTokens = 2000;\n const temperature = 0.2;\n \n // New approach\n import { getAIParameterValue } from './config-manager';\n const maxTokens = getAIParameterValue('maxTokens', 'parsing');\n const temperature = getAIParameterValue('temperature', 'parsing');\n ```\n\n4. Ensure logging behavior respects the centralized logging configuration settings.\n\n5. When calling `generateObjectService`, pass the appropriate configuration context to ensure it uses the correct settings from the centralized configuration system.\n", + "status": "pending", + "dependencies": [ + "61.11,61.12,61.13,61.14,61.15,61.16,61.17,61.19" + ], + "parentTaskId": 61 + }, + { + "id": 19, + "title": "Refactor `updateSubtaskById` AI Call", + "description": "Refactor the AI call within `updateSubtaskById` in `task-manager.js` (which generates additional information based on a prompt) to use the appropriate unified service function (e.g., `generateTextService`) from `ai-services-unified.js`.", + "details": "\n\n\nThe `updateSubtaskById` function currently makes direct AI calls with hardcoded parameters. When refactoring to use the unified service:\n\n1. Replace direct OpenAI calls with `generateTextService` from `ai-services-unified.js`\n2. Use configuration parameters from `config-manager.js`:\n - Replace hardcoded model with `getMainModel()`\n - Use `getMainMaxTokens()` for token limits\n - Apply `getMainTemperature()` for response randomness\n3. Ensure prompt construction remains consistent but passes these dynamic parameters\n4. Handle API key resolution through the unified service (which uses `resolveEnvVariable`)\n5. Update error handling to work with the unified service response format\n6. If the function uses any logging, ensure it respects `getLoggingEnabled()` setting\n\nExample refactoring pattern:\n```javascript\n// Before\nconst completion = await openai.chat.completions.create({\n model: \"gpt-4\",\n temperature: 0.7,\n max_tokens: 1000,\n messages: [/* prompt messages */]\n});\n\n// After\nconst completion = await generateTextService({\n model: getMainModel(),\n temperature: getMainTemperature(),\n max_tokens: getMainMaxTokens(),\n messages: [/* prompt messages */]\n});\n```\n", + "status": "pending", + "dependencies": [ + "61.23" + ], + "parentTaskId": 61 + }, + { + "id": 20, + "title": "Implement `anthropic.js` Provider Module using Vercel AI SDK", + "description": "Create and implement the `anthropic.js` module within `src/ai-providers/`. This module should contain functions to interact with the Anthropic API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.", + "details": "", + "status": "done", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 21, + "title": "Implement `perplexity.js` Provider Module using Vercel AI SDK", + "description": "Create and implement the `perplexity.js` module within `src/ai-providers/`. This module should contain functions to interact with the Perplexity API (likely using their OpenAI-compatible endpoint) via the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`.", + "details": "", + "status": "done", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 22, + "title": "Implement `openai.js` Provider Module using Vercel AI SDK", + "description": "Create and implement the `openai.js` module within `src/ai-providers/`. This module should contain functions to interact with the OpenAI API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. (Optional, implement if OpenAI models are needed).", + "details": "", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 23, + "title": "Implement Conditional Provider Logic in `ai-services-unified.js`", + "description": "Implement logic within the functions of `ai-services-unified.js` (e.g., `generateTextService`, `generateObjectService`, `streamChatService`) to dynamically select and call the appropriate provider module (`anthropic.js`, `perplexity.js`, etc.) based on configuration (e.g., environment variables like `AI_PROVIDER` and `AI_MODEL` from `process.env` or `session.env`).", + "details": "\n\n\nThe unified service should now use the configuration manager for provider selection rather than directly accessing environment variables. Here's the implementation approach:\n\n1. Import the config-manager functions:\n```javascript\nconst { \n getMainProvider, \n getResearchProvider, \n getFallbackProvider,\n getModelForRole,\n getProviderParameters\n} = require('./config-manager');\n```\n\n2. Implement provider selection based on context/role:\n```javascript\nfunction selectProvider(role = 'default', context = {}) {\n // Try to get provider based on role or context\n let provider;\n \n if (role === 'research') {\n provider = getResearchProvider();\n } else if (context.fallback) {\n provider = getFallbackProvider();\n } else {\n provider = getMainProvider();\n }\n \n // Dynamically import the provider module\n return require(`./${provider}.js`);\n}\n```\n\n3. Update service functions to use this selection logic:\n```javascript\nasync function generateTextService(prompt, options = {}) {\n const { role = 'default', ...otherOptions } = options;\n const provider = selectProvider(role, options);\n const model = getModelForRole(role);\n const parameters = getProviderParameters(provider.name);\n \n return provider.generateText(prompt, { \n model, \n ...parameters,\n ...otherOptions \n });\n}\n```\n\n4. Implement fallback logic for service resilience:\n```javascript\nasync function executeWithFallback(serviceFunction, ...args) {\n try {\n return await serviceFunction(...args);\n } catch (error) {\n console.error(`Primary provider failed: ${error.message}`);\n const fallbackProvider = require(`./${getFallbackProvider()}.js`);\n return fallbackProvider[serviceFunction.name](...args);\n }\n}\n```\n\n5. Add provider capability checking to prevent calling unsupported features:\n```javascript\nfunction checkProviderCapability(provider, capability) {\n const capabilities = {\n 'anthropic': ['text', 'chat', 'stream'],\n 'perplexity': ['text', 'chat', 'stream', 'research'],\n 'openai': ['text', 'chat', 'stream', 'embedding', 'vision']\n // Add other providers as needed\n };\n \n return capabilities[provider]?.includes(capability) || false;\n}\n```\n", + "status": "pending", + "dependencies": [ + "61.20,61.21,61.22,61.24,61.25,61.26,61.27,61.28,61.29,61.30,61.34" + ], + "parentTaskId": 61 + }, + { + "id": 24, + "title": "Implement `google.js` Provider Module using Vercel AI SDK", + "description": "Create and implement the `google.js` module within `src/ai-providers/`. This module should contain functions to interact with Google AI models (e.g., Gemini) using the **Vercel AI SDK (`@ai-sdk/google`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.", + "details": "", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 25, + "title": "Implement `ollama.js` Provider Module", + "description": "Create and implement the `ollama.js` module within `src/ai-providers/`. This module should contain functions to interact with local Ollama models using the **`ollama-ai-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used.", + "details": "", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 26, + "title": "Implement `mistral.js` Provider Module using Vercel AI SDK", + "description": "Create and implement the `mistral.js` module within `src/ai-providers/`. This module should contain functions to interact with Mistral AI models using the **Vercel AI SDK (`@ai-sdk/mistral`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.", + "details": "", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 27, + "title": "Implement `azure.js` Provider Module using Vercel AI SDK", + "description": "Create and implement the `azure.js` module within `src/ai-providers/`. This module should contain functions to interact with Azure OpenAI models using the **Vercel AI SDK (`@ai-sdk/azure`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.", + "details": "", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 28, + "title": "Implement `openrouter.js` Provider Module", + "description": "Create and implement the `openrouter.js` module within `src/ai-providers/`. This module should contain functions to interact with various models via OpenRouter using the **`@openrouter/ai-sdk-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used.", + "details": "", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 29, + "title": "Implement `xai.js` Provider Module using Vercel AI SDK", + "description": "Create and implement the `xai.js` module within `src/ai-providers/`. This module should contain functions to interact with xAI models (e.g., Grok) using the **Vercel AI SDK (`@ai-sdk/xai`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.", + "details": "", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 30, + "title": "Update Configuration Management for AI Providers", + "description": "Update `config-manager.js` and related configuration logic/documentation to support the new provider/model selection mechanism for `ai-services-unified.js` (e.g., using `AI_PROVIDER`, `AI_MODEL` env vars from `process.env` or `session.env`), ensuring compatibility with existing role-based selection if needed.", + "details": "\n\n\n```javascript\n// Implementation details for config-manager.js updates\n\n/**\n * Unified configuration resolution function that checks multiple sources in priority order:\n * 1. process.env\n * 2. session.env (if available)\n * 3. Default values from .taskmasterconfig\n * \n * @param {string} key - Configuration key to resolve\n * @param {object} session - Optional session object that may contain env values\n * @param {*} defaultValue - Default value if not found in any source\n * @returns {*} Resolved configuration value\n */\nfunction resolveConfig(key, session = null, defaultValue = null) {\n return process.env[key] ?? session?.env?.[key] ?? defaultValue;\n}\n\n// AI provider/model resolution with fallback to role-based selection\nfunction resolveAIConfig(session = null, role = 'default') {\n const provider = resolveConfig('AI_PROVIDER', session);\n const model = resolveConfig('AI_MODEL', session);\n \n // If explicit provider/model specified, use those\n if (provider && model) {\n return { provider, model };\n }\n \n // Otherwise fall back to role-based configuration\n const roleConfig = getRoleBasedAIConfig(role);\n return {\n provider: provider || roleConfig.provider,\n model: model || roleConfig.model\n };\n}\n\n// Example usage in ai-services-unified.js:\n// const { provider, model } = resolveAIConfig(session, role);\n// const client = getProviderClient(provider, resolveConfig(`${provider.toUpperCase()}_API_KEY`, session));\n\n/**\n * Configuration Resolution Documentation:\n * \n * 1. Environment Variables:\n * - AI_PROVIDER: Explicitly sets the AI provider (e.g., 'openai', 'anthropic')\n * - AI_MODEL: Explicitly sets the model to use (e.g., 'gpt-4', 'claude-2')\n * - OPENAI_API_KEY, ANTHROPIC_API_KEY, etc.: Provider-specific API keys\n * \n * 2. Resolution Strategy:\n * - Values are first checked in process.env\n * - If not found, session.env is checked (when available)\n * - If still not found, defaults from .taskmasterconfig are used\n * - For AI provider/model, explicit settings override role-based configuration\n * \n * 3. Backward Compatibility:\n * - Role-based selection continues to work when AI_PROVIDER/AI_MODEL are not set\n * - Existing code using getRoleBasedAIConfig() will continue to function\n */\n```\n\n\n\n\n```javascript\n/**\n * Refactored configuration management implementation\n */\n\n// Core configuration getters - replace direct CONFIG access\nconst getMainProvider = () => resolveConfig('AI_PROVIDER', null, CONFIG.ai?.mainProvider || 'openai');\nconst getMainModel = () => resolveConfig('AI_MODEL', null, CONFIG.ai?.mainModel || 'gpt-4');\nconst getLogLevel = () => resolveConfig('LOG_LEVEL', null, CONFIG.logging?.level || 'info');\nconst getMaxTokens = (role = 'default') => {\n const explicitMaxTokens = parseInt(resolveConfig('MAX_TOKENS', null, 0), 10);\n if (explicitMaxTokens > 0) return explicitMaxTokens;\n \n // Fall back to role-based configuration\n return CONFIG.ai?.roles?.[role]?.maxTokens || CONFIG.ai?.defaultMaxTokens || 4096;\n};\n\n// API key resolution - separate from general configuration\nfunction resolveEnvVariable(key, session = null) {\n return process.env[key] ?? session?.env?.[key] ?? null;\n}\n\nfunction isApiKeySet(provider, session = null) {\n const keyName = `${provider.toUpperCase()}_API_KEY`;\n return Boolean(resolveEnvVariable(keyName, session));\n}\n\n/**\n * Migration guide for application components:\n * \n * 1. Replace direct CONFIG access:\n * - Before: `const provider = CONFIG.ai.mainProvider;`\n * - After: `const provider = getMainProvider();`\n * \n * 2. Replace direct process.env access for API keys:\n * - Before: `const apiKey = process.env.OPENAI_API_KEY;`\n * - After: `const apiKey = resolveEnvVariable('OPENAI_API_KEY', session);`\n * \n * 3. Check API key availability:\n * - Before: `if (process.env.OPENAI_API_KEY) {...}`\n * - After: `if (isApiKeySet('openai', session)) {...}`\n * \n * 4. Update provider/model selection in ai-services:\n * - Before: \n * ```\n * const provider = role ? CONFIG.ai.roles[role]?.provider : CONFIG.ai.mainProvider;\n * const model = role ? CONFIG.ai.roles[role]?.model : CONFIG.ai.mainModel;\n * ```\n * - After:\n * ```\n * const { provider, model } = resolveAIConfig(session, role);\n * ```\n */\n\n// Update .taskmasterconfig schema documentation\nconst configSchema = {\n \"ai\": {\n \"mainProvider\": \"Default AI provider (overridden by AI_PROVIDER env var)\",\n \"mainModel\": \"Default AI model (overridden by AI_MODEL env var)\",\n \"defaultMaxTokens\": \"Default max tokens (overridden by MAX_TOKENS env var)\",\n \"roles\": {\n \"role_name\": {\n \"provider\": \"Provider for this role (fallback if AI_PROVIDER not set)\",\n \"model\": \"Model for this role (fallback if AI_MODEL not set)\",\n \"maxTokens\": \"Max tokens for this role (fallback if MAX_TOKENS not set)\"\n }\n }\n },\n \"logging\": {\n \"level\": \"Logging level (overridden by LOG_LEVEL env var)\"\n }\n};\n```\n\nImplementation notes:\n1. All configuration getters should provide environment variable override capability first, then fall back to .taskmasterconfig values\n2. API key resolution should be kept separate from general configuration to maintain security boundaries\n3. Update all application components to use these new getters rather than accessing CONFIG or process.env directly\n4. Document the priority order (env vars > session.env > .taskmasterconfig) in JSDoc comments\n5. Ensure backward compatibility by maintaining support for role-based configuration when explicit env vars aren't set\n\n", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 31, + "title": "Implement Integration Tests for Unified AI Service", + "description": "Implement integration tests for `ai-services-unified.js`. These tests should verify the correct routing to different provider modules based on configuration and ensure the unified service functions (`generateTextService`, `generateObjectService`, etc.) work correctly when called from modules like `task-manager.js`.", + "details": "\n\n\nFor the integration tests of the Unified AI Service, consider the following implementation details:\n\n1. Setup test fixtures:\n - Create a mock `.taskmasterconfig` file with different provider configurations\n - Define test cases with various model selections and parameter settings\n - Use environment variable mocks only for API keys (e.g., `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`)\n\n2. Test configuration resolution:\n - Verify that `ai-services-unified.js` correctly retrieves settings from `config-manager.js`\n - Test that model selection follows the hierarchy defined in `.taskmasterconfig`\n - Ensure fallback mechanisms work when primary providers are unavailable\n\n3. Mock the provider modules:\n ```javascript\n jest.mock('../services/openai-service.js');\n jest.mock('../services/anthropic-service.js');\n ```\n\n4. Test specific scenarios:\n - Provider selection based on configured preferences\n - Parameter inheritance from config (temperature, maxTokens)\n - Error handling when API keys are missing\n - Proper routing when specific models are requested\n\n5. Verify integration with task-manager:\n ```javascript\n test('task-manager correctly uses unified AI service with config-based settings', async () => {\n // Setup mock config with specific settings\n mockConfigManager.getAIProviderPreference.mockReturnValue(['openai', 'anthropic']);\n mockConfigManager.getModelForRole.mockReturnValue('gpt-4');\n mockConfigManager.getParametersForModel.mockReturnValue({ temperature: 0.7, maxTokens: 2000 });\n \n // Verify task-manager uses these settings when calling the unified service\n // ...\n });\n ```\n\n6. Include tests for configuration changes at runtime and their effect on service behavior.\n", + "status": "pending", + "dependencies": [ + "61.18" + ], + "parentTaskId": 61 + }, + { + "id": 32, + "title": "Update Documentation for New AI Architecture", + "description": "Update relevant documentation files (e.g., `architecture.mdc`, `taskmaster.mdc`, environment variable guides, README) to accurately reflect the new AI service architecture using `ai-services-unified.js`, provider modules, the Vercel AI SDK, and the updated configuration approach.", + "details": "\n\n\nThe new AI architecture introduces a clear separation between sensitive credentials and configuration settings:\n\n## Environment Variables vs Configuration File\n\n- **Environment Variables (.env)**: \n - Store only sensitive API keys and credentials\n - Accessed via `resolveEnvVariable()` which checks both process.env and session.env\n - Example: `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`\n - No model names, parameters, or non-sensitive settings should be here\n\n- **.taskmasterconfig File**:\n - Central location for all non-sensitive configuration\n - Structured JSON with clear sections for different aspects of the system\n - Contains:\n - Model mappings by role (e.g., `systemModels`, `userModels`)\n - Default parameters (temperature, maxTokens, etc.)\n - Logging preferences\n - Provider-specific settings\n - Accessed via getter functions from `config-manager.js` like:\n ```javascript\n import { getModelForRole, getDefaultTemperature } from './config-manager.js';\n \n // Usage examples\n const model = getModelForRole('system');\n const temp = getDefaultTemperature();\n ```\n\n## Implementation Notes\n- Document the structure of `.taskmasterconfig` with examples\n- Explain the migration path for users with existing setups\n- Include a troubleshooting section for common configuration issues\n- Add a configuration validation section explaining how the system verifies settings\n", + "status": "pending", + "dependencies": [ + "61.31" + ], + "parentTaskId": 61 + }, + { + "id": 33, + "title": "Cleanup Old AI Service Files", + "description": "After all other migration subtasks (refactoring, provider implementation, testing, documentation) are complete and verified, remove the old `ai-services.js` and `ai-client-factory.js` files from the `scripts/modules/` directory. Ensure no code still references them.", + "details": "", + "status": "pending", + "dependencies": [ + "61.32" + ], + "parentTaskId": 61 + }, + { + "id": 34, + "title": "Audit and Standardize Env Variable Access", + "description": "Audit the entire codebase (core modules, provider modules, utilities) to ensure all accesses to environment variables (API keys, configuration flags) consistently use a standardized resolution function (like `resolveEnvVariable` or a new utility) that checks `process.env` first and then `session.env` if available. Refactor any direct `process.env` access where `session.env` should also be considered.", + "details": "\n\n\nThis audit should distinguish between two types of configuration:\n\n1. **Sensitive credentials (API keys)**: These should exclusively use the `resolveEnvVariable` pattern to check both `process.env` and `session.env`. Verify that no API keys are hardcoded or accessed through direct `process.env` references.\n\n2. **Application configuration**: All non-credential settings should be migrated to use the centralized `.taskmasterconfig` system via the `config-manager.js` getters. This includes:\n - Model selections and role assignments\n - Parameter settings (temperature, maxTokens, etc.)\n - Logging configuration\n - Default behaviors and fallbacks\n\nImplementation notes:\n- Create a comprehensive inventory of all environment variable accesses\n- Categorize each as either credential or application configuration\n- For credentials: standardize on `resolveEnvVariable` pattern\n- For app config: migrate to appropriate `config-manager.js` getter methods\n- Document any exceptions that require special handling\n- Add validation to prevent regression (e.g., ESLint rules against direct `process.env` access)\n\nThis separation ensures security best practices for credentials while centralizing application configuration for better maintainability.\n", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 + }, + { + "id": 35, + "title": "Review/Refactor MCP Direct Functions for Explicit Config Root Passing", + "description": "Review all functions in mcp-server/src/core/direct-functions/*.js. Ensure that any calls made from these functions to getters in scripts/modules/config-manager.js (e.g., getMainProvider, getDefaultPriority, getLogLevel, etc.) explicitly pass the projectRoot (obtained from the args object, which is derived from the session context) as the first argument to the getter. This prevents the getters from incorrectly falling back to using findProjectRoot() based on the server's cwd when running in an MCP context. This is crucial for loading the correct .taskmasterconfig settings based on the user's project.", + "details": "", + "status": "pending", + "dependencies": [], + "parentTaskId": 61 } ] } diff --git a/tests/unit/ai-services-unified.test.js b/tests/unit/ai-services-unified.test.js new file mode 100644 index 00000000..3d7a4351 --- /dev/null +++ b/tests/unit/ai-services-unified.test.js @@ -0,0 +1,683 @@ +import { jest } from '@jest/globals'; + +// Mock ai-client-factory +const mockGetClient = jest.fn(); +jest.unstable_mockModule('../../scripts/modules/ai-client-factory.js', () => ({ + getClient: mockGetClient +})); + +// Mock AI SDK Core +const mockGenerateText = jest.fn(); +jest.unstable_mockModule('ai', () => ({ + generateText: mockGenerateText + // Mock other AI SDK functions like streamText as needed +})); + +// Mock utils logger +const mockLog = jest.fn(); +jest.unstable_mockModule('../../scripts/modules/utils.js', () => ({ + log: mockLog + // Keep other exports if utils has more, otherwise just log +})); + +// Import the module to test (AFTER mocks) +const { generateTextService } = await import( + '../../scripts/modules/ai-services-unified.js' +); + +describe('Unified AI Services', () => { + beforeEach(() => { + // Clear mocks before each test + mockGetClient.mockClear(); + mockGenerateText.mockClear(); + mockLog.mockClear(); // Clear log mock + }); + + describe('generateTextService', () => { + test('should get client and call generateText with correct parameters', async () => { + const mockClient = { type: 'mock-client' }; + mockGetClient.mockResolvedValue(mockClient); + mockGenerateText.mockResolvedValue({ text: 'Mock response' }); + + const serviceParams = { + role: 'main', + session: { env: { SOME_KEY: 'value' } }, // Example session + overrideOptions: { provider: 'override' }, // Example overrides + prompt: 'Test prompt', + // Other generateText options like maxTokens, temperature etc. + maxTokens: 100 + }; + + const result = await generateTextService(serviceParams); + + // Verify getClient call + expect(mockGetClient).toHaveBeenCalledTimes(1); + expect(mockGetClient).toHaveBeenCalledWith( + serviceParams.role, + serviceParams.session, + serviceParams.overrideOptions + ); + + // Verify generateText call + expect(mockGenerateText).toHaveBeenCalledTimes(1); + expect(mockGenerateText).toHaveBeenCalledWith({ + model: mockClient, // Ensure the correct client is passed + prompt: serviceParams.prompt, + maxTokens: serviceParams.maxTokens + // Add other expected generateText options here + }); + + // Verify result + expect(result).toEqual({ text: 'Mock response' }); + }); + + test('should retry generateText on specific errors and succeed', async () => { + const mockClient = { type: 'mock-client' }; + mockGetClient.mockResolvedValue(mockClient); + + // Simulate failure then success + mockGenerateText + .mockRejectedValueOnce(new Error('Rate limit exceeded')) // Retryable error + .mockRejectedValueOnce(new Error('Service temporarily unavailable')) // Retryable error + .mockResolvedValue({ text: 'Success after retries' }); + + const serviceParams = { role: 'main', prompt: 'Retry test' }; + + // Use jest.advanceTimersByTime for delays if implemented + // jest.useFakeTimers(); + + const result = await generateTextService(serviceParams); + + expect(mockGetClient).toHaveBeenCalledTimes(1); // Client fetched once + expect(mockGenerateText).toHaveBeenCalledTimes(3); // Initial call + 2 retries + expect(result).toEqual({ text: 'Success after retries' }); + + // jest.useRealTimers(); // Restore real timers if faked + }); + + test('should fail after exhausting retries', async () => { + jest.setTimeout(15000); // Increase timeout further + const mockClient = { type: 'mock-client' }; + mockGetClient.mockResolvedValue(mockClient); + + // Simulate persistent failure + mockGenerateText.mockRejectedValue(new Error('Rate limit exceeded')); + + const serviceParams = { role: 'main', prompt: 'Retry failure test' }; + + await expect(generateTextService(serviceParams)).rejects.toThrow( + 'Rate limit exceeded' + ); + + // Sequence is main -> fallback -> research. It tries all client gets even if main fails. + expect(mockGetClient).toHaveBeenCalledTimes(3); + expect(mockGenerateText).toHaveBeenCalledTimes(3); // Initial call + max retries (assuming 2 retries) + }); + + test('should not retry on non-retryable errors', async () => { + const mockMainClient = { type: 'mock-main' }; + const mockFallbackClient = { type: 'mock-fallback' }; + const mockResearchClient = { type: 'mock-research' }; + + // Simulate a non-retryable error + const nonRetryableError = new Error('Invalid request parameters'); + mockGenerateText.mockRejectedValueOnce(nonRetryableError); // Fail only once + + const serviceParams = { role: 'main', prompt: 'No retry test' }; + + // Sequence is main -> fallback -> research. Even if main fails non-retryably, + // it will still try to get clients for fallback and research before throwing. + // Let's assume getClient succeeds for all three. + mockGetClient + .mockResolvedValueOnce(mockMainClient) + .mockResolvedValueOnce(mockFallbackClient) + .mockResolvedValueOnce(mockResearchClient); + + await expect(generateTextService(serviceParams)).rejects.toThrow( + 'Invalid request parameters' + ); + expect(mockGetClient).toHaveBeenCalledTimes(3); // Tries main, fallback, research + expect(mockGenerateText).toHaveBeenCalledTimes(1); // Called only once for main + }); + + test('should log service entry, client info, attempts, and success', async () => { + const mockClient = { + type: 'mock-client', + provider: 'test-provider', + model: 'test-model' + }; // Add mock details + mockGetClient.mockResolvedValue(mockClient); + mockGenerateText.mockResolvedValue({ text: 'Success' }); + + const serviceParams = { role: 'main', prompt: 'Log test' }; + await generateTextService(serviceParams); + + // Check logs (in order) + expect(mockLog).toHaveBeenNthCalledWith( + 1, + 'info', + 'generateTextService called', + { role: 'main' } + ); + expect(mockLog).toHaveBeenNthCalledWith( + 2, + 'info', + 'Attempting service call with role: main' + ); + expect(mockLog).toHaveBeenNthCalledWith( + 3, + 'info', + 'Retrieved AI client', + { + provider: mockClient.provider, + model: mockClient.model + } + ); + expect(mockLog).toHaveBeenNthCalledWith( + 4, + expect.stringMatching( + /Attempt 1\/3 calling generateText for role main/i + ) + ); + expect(mockLog).toHaveBeenNthCalledWith( + 5, + 'info', + 'generateText succeeded for role main on attempt 1' // Original success log from helper + ); + expect(mockLog).toHaveBeenNthCalledWith( + 6, + 'info', + 'generateTextService succeeded using role: main' // Final success log from service + ); + + // Ensure no failure/retry logs were called + expect(mockLog).not.toHaveBeenCalledWith( + 'warn', + expect.stringContaining('failed') + ); + expect(mockLog).not.toHaveBeenCalledWith( + 'info', + expect.stringContaining('Retrying') + ); + }); + + test('should log retry attempts and eventual failure', async () => { + jest.setTimeout(15000); // Increase timeout further + const mockClient = { + type: 'mock-client', + provider: 'test-provider', + model: 'test-model' + }; + const mockFallbackClient = { type: 'mock-fallback' }; + const mockResearchClient = { type: 'mock-research' }; + mockGetClient + .mockResolvedValueOnce(mockClient) + .mockResolvedValueOnce(mockFallbackClient) + .mockResolvedValueOnce(mockResearchClient); + mockGenerateText.mockRejectedValue(new Error('Rate limit')); + + const serviceParams = { role: 'main', prompt: 'Log retry failure' }; + await expect(generateTextService(serviceParams)).rejects.toThrow( + 'Rate limit' + ); + + // Check logs + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'generateTextService called', + { role: 'main' } + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: main' + ); + expect(mockLog).toHaveBeenCalledWith('info', 'Retrieved AI client', { + provider: mockClient.provider, + model: mockClient.model + }); + expect(mockLog).toHaveBeenCalledWith( + expect.stringMatching( + /Attempt 1\/3 calling generateText for role main/i + ) + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + 'Attempt 1 failed for role main: Rate limit' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Retryable error detected. Retrying in 1s...' + ); + expect(mockLog).toHaveBeenCalledWith( + expect.stringMatching( + /Attempt 2\/3 calling generateText for role main/i + ) + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + 'Attempt 2 failed for role main: Rate limit' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Retryable error detected. Retrying in 2s...' + ); + expect(mockLog).toHaveBeenCalledWith( + expect.stringMatching( + /Attempt 3\/3 calling generateText for role main/i + ) + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + 'Attempt 3 failed for role main: Rate limit' + ); + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Non-retryable error or max retries reached for role main (generateText).' + ); + // Check subsequent fallback attempts (which also fail) + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: fallback' + ); + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Service call failed for role fallback: Rate limit' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: research' + ); + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Service call failed for role research: Rate limit' + ); + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'All roles in the sequence [main,fallback,research] failed.' + ); + }); + + test('should use fallback client after primary fails, then succeed', async () => { + const mockMainClient = { type: 'mock-client', provider: 'main-provider' }; + const mockFallbackClient = { + type: 'mock-client', + provider: 'fallback-provider' + }; + + // Setup calls: main client fails, fallback succeeds + mockGetClient + .mockResolvedValueOnce(mockMainClient) // First call for 'main' role + .mockResolvedValueOnce(mockFallbackClient); // Second call for 'fallback' role + mockGenerateText + .mockRejectedValueOnce(new Error('Main Rate limit')) // Main attempt 1 fail + .mockRejectedValueOnce(new Error('Main Rate limit')) // Main attempt 2 fail + .mockRejectedValueOnce(new Error('Main Rate limit')) // Main attempt 3 fail + .mockResolvedValue({ text: 'Fallback success' }); // Fallback attempt 1 success + + const serviceParams = { role: 'main', prompt: 'Fallback test' }; + const result = await generateTextService(serviceParams); + + // Check calls + expect(mockGetClient).toHaveBeenCalledTimes(2); + expect(mockGetClient).toHaveBeenNthCalledWith( + 1, + 'main', + undefined, + undefined + ); + expect(mockGetClient).toHaveBeenNthCalledWith( + 2, + 'fallback', + undefined, + undefined + ); + expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 main fails, 1 fallback success + expect(mockGenerateText).toHaveBeenNthCalledWith(4, { + model: mockFallbackClient, + prompt: 'Fallback test' + }); + expect(result).toEqual({ text: 'Fallback success' }); + + // Check logs for fallback attempt + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Service call failed for role main: Main Rate limit' + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + 'Retries exhausted or non-retryable error for role main, trying next role in sequence...' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: fallback' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'generateTextService succeeded using role: fallback' + ); + }); + + test('should use research client after primary and fallback fail, then succeed', async () => { + const mockMainClient = { type: 'mock-client', provider: 'main-provider' }; + const mockFallbackClient = { + type: 'mock-client', + provider: 'fallback-provider' + }; + const mockResearchClient = { + type: 'mock-client', + provider: 'research-provider' + }; + + // Setup calls: main fails, fallback fails, research succeeds + mockGetClient + .mockResolvedValueOnce(mockMainClient) + .mockResolvedValueOnce(mockFallbackClient) + .mockResolvedValueOnce(mockResearchClient); + mockGenerateText + .mockRejectedValueOnce(new Error('Main fail 1')) // Main 1 + .mockRejectedValueOnce(new Error('Main fail 2')) // Main 2 + .mockRejectedValueOnce(new Error('Main fail 3')) // Main 3 + .mockRejectedValueOnce(new Error('Fallback fail 1')) // Fallback 1 + .mockRejectedValueOnce(new Error('Fallback fail 2')) // Fallback 2 + .mockRejectedValueOnce(new Error('Fallback fail 3')) // Fallback 3 + .mockResolvedValue({ text: 'Research success' }); // Research 1 success + + const serviceParams = { role: 'main', prompt: 'Research fallback test' }; + const result = await generateTextService(serviceParams); + + // Check calls + expect(mockGetClient).toHaveBeenCalledTimes(3); + expect(mockGetClient).toHaveBeenNthCalledWith( + 1, + 'main', + undefined, + undefined + ); + expect(mockGetClient).toHaveBeenNthCalledWith( + 2, + 'fallback', + undefined, + undefined + ); + expect(mockGetClient).toHaveBeenNthCalledWith( + 3, + 'research', + undefined, + undefined + ); + expect(mockGenerateText).toHaveBeenCalledTimes(7); // 3 main, 3 fallback, 1 research + expect(mockGenerateText).toHaveBeenNthCalledWith(7, { + model: mockResearchClient, + prompt: 'Research fallback test' + }); + expect(result).toEqual({ text: 'Research success' }); + + // Check logs for fallback attempt + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Service call failed for role main: Main fail 3' // Error from last attempt for role + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + 'Retries exhausted or non-retryable error for role main, trying next role in sequence...' + ); + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Service call failed for role fallback: Fallback fail 3' // Error from last attempt for role + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + 'Retries exhausted or non-retryable error for role fallback, trying next role in sequence...' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: research' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'generateTextService succeeded using role: research' + ); + }); + + test('should fail if primary, fallback, and research clients all fail', async () => { + const mockMainClient = { type: 'mock-client', provider: 'main' }; + const mockFallbackClient = { type: 'mock-client', provider: 'fallback' }; + const mockResearchClient = { type: 'mock-client', provider: 'research' }; + + // Setup calls: all fail + mockGetClient + .mockResolvedValueOnce(mockMainClient) + .mockResolvedValueOnce(mockFallbackClient) + .mockResolvedValueOnce(mockResearchClient); + mockGenerateText + .mockRejectedValueOnce(new Error('Main fail 1')) + .mockRejectedValueOnce(new Error('Main fail 2')) + .mockRejectedValueOnce(new Error('Main fail 3')) + .mockRejectedValueOnce(new Error('Fallback fail 1')) + .mockRejectedValueOnce(new Error('Fallback fail 2')) + .mockRejectedValueOnce(new Error('Fallback fail 3')) + .mockRejectedValueOnce(new Error('Research fail 1')) + .mockRejectedValueOnce(new Error('Research fail 2')) + .mockRejectedValueOnce(new Error('Research fail 3')); // Last error + + const serviceParams = { role: 'main', prompt: 'All fail test' }; + + await expect(generateTextService(serviceParams)).rejects.toThrow( + 'Research fail 3' // Should throw the error from the LAST failed attempt + ); + + // Check calls + expect(mockGetClient).toHaveBeenCalledTimes(3); + expect(mockGenerateText).toHaveBeenCalledTimes(9); // 3 for each role + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'All roles in the sequence [main,fallback,research] failed.' + ); + }); + + test('should handle error getting fallback client', async () => { + const mockMainClient = { type: 'mock-client', provider: 'main' }; + + // Setup calls: main fails, getting fallback client fails, research succeeds (to test sequence) + const mockResearchClient = { type: 'mock-client', provider: 'research' }; + mockGetClient + .mockResolvedValueOnce(mockMainClient) + .mockRejectedValueOnce(new Error('Cannot get fallback client')) + .mockResolvedValueOnce(mockResearchClient); + + mockGenerateText + .mockRejectedValueOnce(new Error('Main fail 1')) + .mockRejectedValueOnce(new Error('Main fail 2')) + .mockRejectedValueOnce(new Error('Main fail 3')) // Main fails 3 times + .mockResolvedValue({ text: 'Research success' }); // Research succeeds on its 1st attempt + + const serviceParams = { role: 'main', prompt: 'Fallback client error' }; + + // Should eventually succeed with research after main+fallback fail + const result = await generateTextService(serviceParams); + expect(result).toEqual({ text: 'Research success' }); + + expect(mockGetClient).toHaveBeenCalledTimes(3); // Tries main, fallback (fails), research + expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 main attempts, 1 research attempt + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Service call failed for role fallback: Cannot get fallback client' + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + 'Could not get client for role fallback, trying next role in sequence...' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: research' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + expect.stringContaining( + 'generateTextService succeeded using role: research' + ) + ); + }); + + test('should try research after fallback fails if initial role is fallback', async () => { + const mockFallbackClient = { type: 'mock-client', provider: 'fallback' }; + const mockResearchClient = { type: 'mock-client', provider: 'research' }; + + mockGetClient + .mockResolvedValueOnce(mockFallbackClient) + .mockResolvedValueOnce(mockResearchClient); + mockGenerateText + .mockRejectedValueOnce(new Error('Fallback fail 1')) // Fallback 1 + .mockRejectedValueOnce(new Error('Fallback fail 2')) // Fallback 2 + .mockRejectedValueOnce(new Error('Fallback fail 3')) // Fallback 3 + .mockResolvedValue({ text: 'Research success' }); // Research 1 + + const serviceParams = { role: 'fallback', prompt: 'Start with fallback' }; + const result = await generateTextService(serviceParams); + + expect(mockGetClient).toHaveBeenCalledTimes(2); // Fallback, Research + expect(mockGetClient).toHaveBeenNthCalledWith( + 1, + 'fallback', + undefined, + undefined + ); + expect(mockGetClient).toHaveBeenNthCalledWith( + 2, + 'research', + undefined, + undefined + ); + expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 fallback, 1 research + expect(result).toEqual({ text: 'Research success' }); + + // Check logs for sequence + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: fallback' + ); + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Service call failed for role fallback: Fallback fail 3' + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + expect.stringContaining( + 'Retries exhausted or non-retryable error for role fallback' + ) + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: research' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + expect.stringContaining( + 'generateTextService succeeded using role: research' + ) + ); + }); + + test('should try fallback after research fails if initial role is research', async () => { + const mockResearchClient = { type: 'mock-client', provider: 'research' }; + const mockFallbackClient = { type: 'mock-client', provider: 'fallback' }; + + mockGetClient + .mockResolvedValueOnce(mockResearchClient) + .mockResolvedValueOnce(mockFallbackClient); + mockGenerateText + .mockRejectedValueOnce(new Error('Research fail 1')) // Research 1 + .mockRejectedValueOnce(new Error('Research fail 2')) // Research 2 + .mockRejectedValueOnce(new Error('Research fail 3')) // Research 3 + .mockResolvedValue({ text: 'Fallback success' }); // Fallback 1 + + const serviceParams = { role: 'research', prompt: 'Start with research' }; + const result = await generateTextService(serviceParams); + + expect(mockGetClient).toHaveBeenCalledTimes(2); // Research, Fallback + expect(mockGetClient).toHaveBeenNthCalledWith( + 1, + 'research', + undefined, + undefined + ); + expect(mockGetClient).toHaveBeenNthCalledWith( + 2, + 'fallback', + undefined, + undefined + ); + expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 research, 1 fallback + expect(result).toEqual({ text: 'Fallback success' }); + + // Check logs for sequence + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: research' + ); + expect(mockLog).toHaveBeenCalledWith( + 'error', + 'Service call failed for role research: Research fail 3' + ); + expect(mockLog).toHaveBeenCalledWith( + 'warn', + expect.stringContaining( + 'Retries exhausted or non-retryable error for role research' + ) + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + 'Attempting service call with role: fallback' + ); + expect(mockLog).toHaveBeenCalledWith( + 'info', + expect.stringContaining( + 'generateTextService succeeded using role: fallback' + ) + ); + }); + + test('should use default sequence and log warning for unknown initial role', async () => { + const mockMainClient = { type: 'mock-client', provider: 'main' }; + const mockFallbackClient = { type: 'mock-client', provider: 'fallback' }; + + mockGetClient + .mockResolvedValueOnce(mockMainClient) + .mockResolvedValueOnce(mockFallbackClient); + mockGenerateText + .mockRejectedValueOnce(new Error('Main fail 1')) // Main 1 + .mockRejectedValueOnce(new Error('Main fail 2')) // Main 2 + .mockRejectedValueOnce(new Error('Main fail 3')) // Main 3 + .mockResolvedValue({ text: 'Fallback success' }); // Fallback 1 + + const serviceParams = { + role: 'invalid-role', + prompt: 'Unknown role test' + }; + const result = await generateTextService(serviceParams); + + // Check warning log for unknown role + expect(mockLog).toHaveBeenCalledWith( + 'warn', + 'Unknown initial role: invalid-role. Defaulting to main -> fallback -> research sequence.' + ); + + // Check it followed the default main -> fallback sequence + expect(mockGetClient).toHaveBeenCalledTimes(2); // Main, Fallback + expect(mockGetClient).toHaveBeenNthCalledWith( + 1, + 'main', + undefined, + undefined + ); + expect(mockGetClient).toHaveBeenNthCalledWith( + 2, + 'fallback', + undefined, + undefined + ); + expect(mockGenerateText).toHaveBeenCalledTimes(4); // 3 main, 1 fallback + expect(result).toEqual({ text: 'Fallback success' }); + }); + }); +});