diff --git a/.changeset/public-cooks-fetch.md b/.changeset/public-cooks-fetch.md new file mode 100644 index 00000000..6ecd9bde --- /dev/null +++ b/.changeset/public-cooks-fetch.md @@ -0,0 +1,7 @@ +--- +'task-master-ai': minor +--- + +Feat: Integrate OpenAI as a new AI provider. +Feat: Enhance `models` command/tool to display API key status. +Feat: Implement model-specific `maxTokens` override based on `supported-models.json` to save you if you use an incorrect max token value. diff --git a/.cursor/rules/ai_providers.mdc b/.cursor/rules/ai_providers.mdc index dcc9ef12..35800174 100644 --- a/.cursor/rules/ai_providers.mdc +++ b/.cursor/rules/ai_providers.mdc @@ -48,7 +48,7 @@ This rule guides AI assistants on how to view, configure, and interact with the - **`mistral`**: Requires `MISTRAL_API_KEY`. - **`azure`**: Requires `AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT`. - **`openrouter`**: Requires `OPENROUTER_API_KEY`. - - **`ollama`**: Typically requires `OLLAMA_API_KEY` *and* `OLLAMA_BASE_URL` (default: `http://localhost:11434/api`). *Check specific setup.* + - **`ollama`**: Might require `OLLAMA_API_KEY` (not currently supported) *and* `OLLAMA_BASE_URL` (default: `http://localhost:11434/api`). *Check specific setup.* - **Troubleshooting:** - If AI commands fail (especially in MCP context): diff --git a/.env.example b/.env.example index 89480ddd..3f0a1cd6 100644 --- a/.env.example +++ b/.env.example @@ -7,4 +7,3 @@ MISTRAL_API_KEY=YOUR_MISTRAL_KEY_HERE OPENROUTER_API_KEY=YOUR_OPENROUTER_KEY_HERE XAI_API_KEY=YOUR_XAI_KEY_HERE AZURE_OPENAI_API_KEY=YOUR_AZURE_KEY_HERE -OLLAMA_API_KEY=YOUR_OLLAMA_KEY_HERE diff --git a/.taskmasterconfig b/.taskmasterconfig index 483fb034..ffda308e 100644 --- a/.taskmasterconfig +++ b/.taskmasterconfig @@ -1,9 +1,9 @@ { "models": { "main": { - "provider": "google", - "modelId": "gemini-2.5-pro-exp-03-25", - "maxTokens": 120000, + "provider": "openai", + "modelId": "o3-mini", + "maxTokens": 100000, "temperature": 0.2 }, "research": { @@ -14,7 +14,7 @@ }, "fallback": { "provider": "anthropic", - "modelId": "claude-3-5-sonnet-20241022", + "modelId": "claude-3-7-sonnet-20250219", "maxTokens": 120000, "temperature": 0.2 } diff --git a/README.md b/README.md index 31f9a3d2..27869786 100644 --- a/README.md +++ b/README.md @@ -42,8 +42,7 @@ npm i -g task-master-ai "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE", "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE", "XAI_API_KEY": "YOUR_XAI_KEY_HERE", - "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE", - "OLLAMA_API_KEY": "YOUR_OLLAMA_KEY_HERE" + "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE" } } } diff --git a/assets/env.example b/assets/env.example index ff5c877b..d44c6b09 100644 --- a/assets/env.example +++ b/assets/env.example @@ -5,5 +5,4 @@ OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenR GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models. MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models. XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models. -AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). -OLLAMA_API_KEY=YOUR_OLLAMA_KEY_HERE # Optional, for local Ollama AI models (requires endpoint in .taskmasterconfig). \ No newline at end of file +AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig). \ No newline at end of file diff --git a/docs/tutorial.md b/docs/tutorial.md index 3c94003a..8c20235a 100644 --- a/docs/tutorial.md +++ b/docs/tutorial.md @@ -32,8 +32,7 @@ npm i -g task-master-ai "MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE", "OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE", "XAI_API_KEY": "YOUR_XAI_KEY_HERE", - "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE", - "OLLAMA_API_KEY": "YOUR_OLLAMA_KEY_HERE" + "AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE" } } } diff --git a/package-lock.json b/package-lock.json index 988a40e9..4d3c982e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -13,7 +13,7 @@ "@ai-sdk/azure": "^1.3.17", "@ai-sdk/google": "^1.2.13", "@ai-sdk/mistral": "^1.2.7", - "@ai-sdk/openai": "^1.3.16", + "@ai-sdk/openai": "^1.3.20", "@ai-sdk/perplexity": "^1.1.7", "@ai-sdk/xai": "^1.2.13", "@anthropic-ai/sdk": "^0.39.0", @@ -90,6 +90,22 @@ "zod": "^3.0.0" } }, + "node_modules/@ai-sdk/azure/node_modules/@ai-sdk/openai": { + "version": "1.3.16", + "resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.16.tgz", + "integrity": "sha512-pjtiBKt1GgaSKZryTbM3tqgoegJwgAUlp1+X5uN6T+VPnI4FLSymV65tyloWzDlyqZmi9HXnnSRPu76VoL5D5g==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider": "1.1.3", + "@ai-sdk/provider-utils": "2.2.7" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "zod": "^3.0.0" + } + }, "node_modules/@ai-sdk/google": { "version": "1.2.13", "resolved": "https://registry.npmjs.org/@ai-sdk/google/-/google-1.2.13.tgz", @@ -123,9 +139,9 @@ } }, "node_modules/@ai-sdk/openai": { - "version": "1.3.16", - "resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.16.tgz", - "integrity": "sha512-pjtiBKt1GgaSKZryTbM3tqgoegJwgAUlp1+X5uN6T+VPnI4FLSymV65tyloWzDlyqZmi9HXnnSRPu76VoL5D5g==", + "version": "1.3.20", + "resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.20.tgz", + "integrity": "sha512-/DflUy7ROG9k6n6YTXMBFPbujBKnbGY58f3CwvicLvDar9nDAloVnUWd3LUoOxpSVnX8vtQ7ngxF52SLWO6RwQ==", "license": "Apache-2.0", "dependencies": { "@ai-sdk/provider": "1.1.3", diff --git a/package.json b/package.json index c1be031c..ec905c5e 100644 --- a/package.json +++ b/package.json @@ -42,7 +42,7 @@ "@ai-sdk/azure": "^1.3.17", "@ai-sdk/google": "^1.2.13", "@ai-sdk/mistral": "^1.2.7", - "@ai-sdk/openai": "^1.3.16", + "@ai-sdk/openai": "^1.3.20", "@ai-sdk/perplexity": "^1.1.7", "@ai-sdk/xai": "^1.2.13", "@anthropic-ai/sdk": "^0.39.0", diff --git a/scripts/modules/ai-services-unified.js b/scripts/modules/ai-services-unified.js index d5398df8..e94d2b25 100644 --- a/scripts/modules/ai-services-unified.js +++ b/scripts/modules/ai-services-unified.js @@ -25,7 +25,8 @@ import { log, resolveEnvVariable } from './utils.js'; import * as anthropic from '../../src/ai-providers/anthropic.js'; import * as perplexity from '../../src/ai-providers/perplexity.js'; import * as google from '../../src/ai-providers/google.js'; // Import Google provider -// TODO: Import other provider modules when implemented (openai, ollama, etc.) +import * as openai from '../../src/ai-providers/openai.js'; // ADD: Import OpenAI provider +// TODO: Import other provider modules when implemented (ollama, etc.) // --- Provider Function Map --- // Maps provider names (lowercase) to their respective service functions @@ -47,8 +48,14 @@ const PROVIDER_FUNCTIONS = { generateText: google.generateGoogleText, streamText: google.streamGoogleText, generateObject: google.generateGoogleObject + }, + openai: { + // ADD: OpenAI entry + generateText: openai.generateOpenAIText, + streamText: openai.streamOpenAIText, + generateObject: openai.generateOpenAIObject } - // TODO: Add entries for openai, ollama, etc. when implemented + // TODO: Add entries for ollama, etc. when implemented }; // --- Configuration for Retries --- @@ -71,6 +78,54 @@ function isRetryableError(error) { ); } +/** + * Extracts a user-friendly error message from a potentially complex AI error object. + * Prioritizes nested messages and falls back to the top-level message. + * @param {Error | object | any} error - The error object. + * @returns {string} A concise error message. + */ +function _extractErrorMessage(error) { + try { + // Attempt 1: Look for Vercel SDK specific nested structure (common) + if (error?.data?.error?.message) { + return error.data.error.message; + } + + // Attempt 2: Look for nested error message directly in the error object + if (error?.error?.message) { + return error.error.message; + } + + // Attempt 3: Look for nested error message in response body if it's JSON string + if (typeof error?.responseBody === 'string') { + try { + const body = JSON.parse(error.responseBody); + if (body?.error?.message) { + return body.error.message; + } + } catch (parseError) { + // Ignore if responseBody is not valid JSON + } + } + + // Attempt 4: Use the top-level message if it exists + if (typeof error?.message === 'string' && error.message) { + return error.message; + } + + // Attempt 5: Handle simple string errors + if (typeof error === 'string') { + return error; + } + + // Fallback + return 'An unknown AI service error occurred.'; + } catch (e) { + // Safety net + return 'Failed to extract error message.'; + } +} + /** * Internal helper to resolve the API key for a given provider. * @param {string} providerName - The name of the provider (lowercase). @@ -87,8 +142,7 @@ function _resolveApiKey(providerName, session) { mistral: 'MISTRAL_API_KEY', azure: 'AZURE_OPENAI_API_KEY', openrouter: 'OPENROUTER_API_KEY', - xai: 'XAI_API_KEY', - ollama: 'OLLAMA_API_KEY' + xai: 'XAI_API_KEY' }; // Double check this -- I have had to use an api key for ollama in the past @@ -211,6 +265,8 @@ async function _unifiedServiceRunner(serviceType, params) { } let lastError = null; + let lastCleanErrorMessage = + 'AI service call failed for all configured roles.'; for (const currentRole of sequence) { let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn; @@ -344,23 +400,21 @@ async function _unifiedServiceRunner(serviceType, params) { return result; // Return original result for other cases } catch (error) { + const cleanMessage = _extractErrorMessage(error); // Extract clean message log( 'error', // Log as error since this role attempt failed - `Service call failed for role ${currentRole} (Provider: ${providerName || 'unknown'}): ${error.message}` + `Service call failed for role ${currentRole} (Provider: ${providerName || 'unknown'}): ${cleanMessage}` // Log the clean message ); - lastError = error; // Store the error to throw if all roles fail - // Log reason and continue (handled within the loop now) + lastError = error; // Store the original error for potential debugging + lastCleanErrorMessage = cleanMessage; // Store the clean message for final throw + // Continue to the next role in the sequence } } // If loop completes, all roles failed log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`); - throw ( - lastError || - new Error( - `AI service call (${serviceType}) failed for all configured roles in the sequence.` - ) - ); + // Throw a new error with the cleaner message from the last failure + throw new Error(lastCleanErrorMessage); } /** diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index 7750c5b4..765064c1 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -45,7 +45,9 @@ import { getDebugFlag, getConfig, writeConfig, - ConfigurationError // Import the custom error + ConfigurationError, // Import the custom error + getAllProviders, + isConfigFilePresent } from './config-manager.js'; import { @@ -57,17 +59,300 @@ import { getStatusWithColor, confirmTaskOverwrite, startLoadingIndicator, - stopLoadingIndicator + stopLoadingIndicator, + displayModelConfiguration, + displayAvailableModels, + displayApiKeyStatus } from './ui.js'; import { initializeProject } from '../init.js'; import { getModelConfiguration, getAvailableModelsList, - setModel + setModel, + getApiKeyStatusReport } from './task-manager/models.js'; // Import new core functions import { findProjectRoot } from './utils.js'; // Import findProjectRoot +/** + * Runs the interactive setup process for model configuration. + * @param {string|null} projectRoot - The resolved project root directory. + */ +async function runInteractiveSetup(projectRoot) { + if (!projectRoot) { + console.error( + chalk.red( + 'Error: Could not determine project root for interactive setup.' + ) + ); + process.exit(1); + } + // Get available models - pass projectRoot + const availableModelsResult = await getAvailableModelsList({ projectRoot }); + if (!availableModelsResult.success) { + console.error( + chalk.red( + `Error fetching available models: ${availableModelsResult.error?.message || 'Unknown error'}` + ) + ); + process.exit(1); + } + const availableModelsForSetup = availableModelsResult.data.models; + + // Get current config - pass projectRoot + const currentConfigResult = await getModelConfiguration({ projectRoot }); + // Allow setup even if current config fails (might be first time run) + const currentModels = currentConfigResult.success + ? currentConfigResult.data?.activeModels + : { main: {}, research: {}, fallback: {} }; + if ( + !currentConfigResult.success && + currentConfigResult.error?.code !== 'CONFIG_MISSING' + ) { + // Log error if it's not just a missing file + console.error( + chalk.red( + `Warning: Could not fetch current configuration: ${currentConfigResult.error?.message || 'Unknown error'}` + ) + ); + } + + console.log(chalk.cyan.bold('\nInteractive Model Setup:')); + + // Find all available models for setup options + const allModelsForSetup = availableModelsForSetup + .filter((model) => !model.modelId.startsWith('[')) // Filter out placeholders like [ollama-any] + .map((model) => ({ + name: `${model.provider} / ${model.modelId}`, + value: { provider: model.provider, id: model.modelId } + })); + + if (allModelsForSetup.length === 0) { + console.error( + chalk.red('Error: No selectable models found in configuration.') + ); + process.exit(1); + } + + // Helper to get choices and default index for a role + const getPromptData = (role, allowNone = false) => { + const roleChoices = allModelsForSetup.filter((modelChoice) => + availableModelsForSetup + .find((m) => m.modelId === modelChoice.value.id) + ?.allowedRoles?.includes(role) + ); + + let choices = [...roleChoices]; + let defaultIndex = -1; + const currentModelId = currentModels[role]?.modelId; + + if (allowNone) { + choices = [ + { name: 'None (disable)', value: null }, + new inquirer.Separator(), + ...roleChoices + ]; + if (currentModelId) { + const foundIndex = roleChoices.findIndex( + (m) => m.value.id === currentModelId + ); + defaultIndex = foundIndex !== -1 ? foundIndex + 2 : 0; // +2 for None and Separator + } else { + defaultIndex = 0; // Default to 'None' + } + } else { + if (currentModelId) { + defaultIndex = roleChoices.findIndex( + (m) => m.value.id === currentModelId + ); + } + // Ensure defaultIndex is valid, otherwise default to 0 + if (defaultIndex < 0 || defaultIndex >= roleChoices.length) { + defaultIndex = 0; + } + } + + // Add Cancel option + const cancelOption = { name: 'Cancel setup', value: '__CANCEL__' }; + choices = [cancelOption, new inquirer.Separator(), ...choices]; + // Adjust default index accounting for Cancel and Separator + defaultIndex = defaultIndex !== -1 ? defaultIndex + 2 : 0; + + return { choices, default: defaultIndex }; + }; + + // --- Generate choices using the helper --- + const mainPromptData = getPromptData('main'); + const researchPromptData = getPromptData('research'); + const fallbackPromptData = getPromptData('fallback', true); // Allow 'None' for fallback + + const answers = await inquirer.prompt([ + { + type: 'list', + name: 'mainModel', + message: 'Select the main model for generation/updates:', + choices: mainPromptData.choices, + default: mainPromptData.default + }, + { + type: 'list', + name: 'researchModel', + message: 'Select the research model:', + choices: researchPromptData.choices, + default: researchPromptData.default, + when: (ans) => ans.mainModel !== '__CANCEL__' + }, + { + type: 'list', + name: 'fallbackModel', + message: 'Select the fallback model (optional):', + choices: fallbackPromptData.choices, + default: fallbackPromptData.default, + when: (ans) => + ans.mainModel !== '__CANCEL__' && ans.researchModel !== '__CANCEL__' + } + ]); + + // Check if user canceled at any point + if ( + answers.mainModel === '__CANCEL__' || + answers.researchModel === '__CANCEL__' || + answers.fallbackModel === '__CANCEL__' + ) { + console.log(chalk.yellow('\nSetup canceled. No changes made.')); + return; // Return instead of exit to allow display logic to run maybe? Or exit? Let's return for now. + } + + // Apply changes using setModel + let setupSuccess = true; + let setupConfigModified = false; + const coreOptionsSetup = { projectRoot }; // Pass root for setup actions + + // Set Main Model + if ( + answers.mainModel?.id && + answers.mainModel.id !== currentModels.main?.modelId + ) { + const result = await setModel( + 'main', + answers.mainModel.id, + coreOptionsSetup + ); + if (result.success) { + console.log( + chalk.blue( + `Selected main model: ${result.data.provider} / ${result.data.modelId}` + ) + ); + setupConfigModified = true; + } else { + console.error( + chalk.red( + `Error setting main model: ${result.error?.message || 'Unknown'}` + ) + ); + setupSuccess = false; + } + } + + // Set Research Model + if ( + answers.researchModel?.id && + answers.researchModel.id !== currentModels.research?.modelId + ) { + const result = await setModel( + 'research', + answers.researchModel.id, + coreOptionsSetup + ); + if (result.success) { + console.log( + chalk.blue( + `Selected research model: ${result.data.provider} / ${result.data.modelId}` + ) + ); + setupConfigModified = true; + } else { + console.error( + chalk.red( + `Error setting research model: ${result.error?.message || 'Unknown'}` + ) + ); + setupSuccess = false; + } + } + + // Set Fallback Model - Handle 'None' selection + const currentFallbackId = currentModels.fallback?.modelId; + const selectedFallbackValue = answers.fallbackModel; // Could be null or model object + const selectedFallbackId = selectedFallbackValue?.id; // Undefined if null + + if (selectedFallbackId !== currentFallbackId) { + // Compare IDs + if (selectedFallbackId) { + // User selected a specific fallback model + const result = await setModel( + 'fallback', + selectedFallbackId, + coreOptionsSetup + ); + if (result.success) { + console.log( + chalk.blue( + `Selected fallback model: ${result.data.provider} / ${result.data.modelId}` + ) + ); + setupConfigModified = true; + } else { + console.error( + chalk.red( + `Error setting fallback model: ${result.error?.message || 'Unknown'}` + ) + ); + setupSuccess = false; + } + } else if (currentFallbackId) { + // User selected 'None' but a fallback was previously set + // Need to explicitly clear it in the config file + const currentCfg = getConfig(projectRoot); // Pass root + if (currentCfg?.models?.fallback) { + // Check if fallback exists before clearing + currentCfg.models.fallback = { + ...currentCfg.models.fallback, // Keep params like tokens/temp + provider: undefined, + modelId: undefined + }; + if (writeConfig(currentCfg, projectRoot)) { + // Pass root + console.log(chalk.blue('Fallback model disabled.')); + setupConfigModified = true; + } else { + console.error( + chalk.red('Failed to disable fallback model in config file.') + ); + setupSuccess = false; + } + } else { + console.log(chalk.blue('Fallback model was already disabled.')); + } + } + // No action needed if fallback was already null/undefined and user selected None + } + + if (setupSuccess && setupConfigModified) { + console.log(chalk.green.bold('\nModel setup complete!')); + } else if (setupSuccess && !setupConfigModified) { + console.log(chalk.yellow('\nNo changes made to model configuration.')); + } else if (!setupSuccess) { + console.error( + chalk.red( + '\nErrors occurred during model selection. Please review and try again.' + ) + ); + } + // Let the main command flow continue to display results +} + /** * Configure and register CLI commands * @param {Object} program - Commander program instance @@ -1596,609 +1881,126 @@ function registerCommands(programInstance) { ) .option('--setup', 'Run interactive setup to configure models') .action(async (options) => { - try { - // ---> Explicitly find project root for CLI execution <--- - const projectRoot = findProjectRoot(); - if (!projectRoot && !options.setup) { - // Allow setup even if root isn't found immediately + const projectRoot = findProjectRoot(); // Find project root for context + + // --- Handle Interactive Setup --- + if (options.setup) { + // Assume runInteractiveSetup is defined elsewhere in this file + await runInteractiveSetup(projectRoot); + // No return here, flow continues to display results below + } + // --- Handle Direct Set Operations (only if not running setup) --- + else { + let modelUpdated = false; + if (options.setMain) { + const result = await setModel('main', options.setMain, { + projectRoot + }); + if (result.success) { + console.log(chalk.green(`✅ ${result.data.message}`)); + modelUpdated = true; + } else { + console.error(chalk.red(`❌ Error: ${result.error.message}`)); + // Optionally exit or provide more specific feedback + } + } + if (options.setResearch) { + const result = await setModel('research', options.setResearch, { + projectRoot + }); + if (result.success) { + console.log(chalk.green(`✅ ${result.data.message}`)); + modelUpdated = true; + } else { + console.error(chalk.red(`❌ Error: ${result.error.message}`)); + } + } + if (options.setFallback) { + const result = await setModel('fallback', options.setFallback, { + projectRoot + }); + if (result.success) { + console.log(chalk.green(`✅ ${result.data.message}`)); + modelUpdated = true; + } else { + console.error(chalk.red(`❌ Error: ${result.error.message}`)); + } + } + // If only set flags were used, we still proceed to display the results + } + // --- Always Display Status After Setup or Set --- + + const configResult = await getModelConfiguration({ projectRoot }); + // Fetch available models *before* displaying config to use for formatting + const availableResult = await getAvailableModelsList({ projectRoot }); + const apiKeyStatusResult = await getApiKeyStatusReport({ projectRoot }); // Fetch API key status + + // 1. Display Active Models + if (!configResult.success) { + // If config is missing AFTER setup attempt, it might indicate an issue saving. + if (options.setup && configResult.error?.code === 'CONFIG_MISSING') { console.error( chalk.red( - "Error: Could not determine the project root. Ensure you're running this command within a Task Master project directory." + `❌ Error: Configuration file still missing after setup attempt. Check file permissions.` ) ); - process.exit(1); - } - // ---> End find project root <--- - - // --- Set Operations --- - if (options.setMain || options.setResearch || options.setFallback) { - let resultSet = null; - const coreOptions = { projectRoot }; // Pass root to setModel - if (options.setMain) { - resultSet = await setModel('main', options.setMain, coreOptions); - } else if (options.setResearch) { - resultSet = await setModel( - 'research', - options.setResearch, - coreOptions - ); - } else if (options.setFallback) { - resultSet = await setModel( - 'fallback', - options.setFallback, - coreOptions - ); - } - - if (resultSet?.success) { - console.log(chalk.green(resultSet.data.message)); - } else { - console.error( - chalk.red( - `Error setting model: ${resultSet?.error?.message || 'Unknown error'}` - ) - ); - if (resultSet?.error?.code === 'MODEL_NOT_FOUND') { - console.log( - chalk.yellow( - '\\nRun `task-master models` to see available models.' - ) - ); - } - process.exit(1); - } - return; // Exit after successful set operation - } - - // --- Interactive Setup --- - if (options.setup) { - // Get available models for interactive setup - pass projectRoot - const availableModelsResult = await getAvailableModelsList({ - projectRoot - }); - if (!availableModelsResult.success) { - console.error( - chalk.red( - `Error fetching available models: ${availableModelsResult.error?.message || 'Unknown error'}` - ) - ); - process.exit(1); - } - const availableModelsForSetup = availableModelsResult.data.models; - - // Get current config - pass projectRoot - const currentConfigResult = await getModelConfiguration({ - projectRoot - }); - if (!currentConfigResult.success) { - console.error( - chalk.red( - `Error fetching current configuration: ${currentConfigResult.error?.message || 'Unknown error'}` - ) - ); - // Allow setup even if current config fails (might be first time run) - } - const currentModels = currentConfigResult.data?.activeModels || { - main: {}, - research: {}, - fallback: {} - }; - - console.log(chalk.cyan.bold('\\nInteractive Model Setup:')); - - // Find all available models for setup options - const allModelsForSetup = availableModelsForSetup.map((model) => ({ - name: `${model.provider} / ${model.modelId}`, - value: { provider: model.provider, id: model.modelId } - })); - - if (allModelsForSetup.length === 0) { - console.error( - chalk.red('Error: No selectable models found in configuration.') - ); - process.exit(1); - } - - // Helper to get choices and default index for a role - const getPromptData = (role, allowNone = false) => { - const roleChoices = allModelsForSetup.filter((modelChoice) => - availableModelsForSetup - .find((m) => m.modelId === modelChoice.value.id) - ?.allowedRoles?.includes(role) - ); - - let choices = [...roleChoices]; - let defaultIndex = -1; - const currentModelId = currentModels[role]?.modelId; - - if (allowNone) { - choices = [ - { name: 'None (disable)', value: null }, - new inquirer.Separator(), - ...roleChoices - ]; - if (currentModelId) { - const foundIndex = roleChoices.findIndex( - (m) => m.value.id === currentModelId - ); - defaultIndex = foundIndex !== -1 ? foundIndex + 2 : 0; // +2 for None and Separator - } else { - defaultIndex = 0; // Default to 'None' - } - } else { - if (currentModelId) { - defaultIndex = roleChoices.findIndex( - (m) => m.value.id === currentModelId - ); - } - } - - // Add Cancel option - const cancelOption = { - name: 'Cancel setup (q)', - value: '__CANCEL__' - }; - choices = [cancelOption, new inquirer.Separator(), ...choices]; - defaultIndex = defaultIndex !== -1 ? defaultIndex + 2 : 0; // +2 for Cancel and Separator - - return { choices, default: defaultIndex }; - }; - - // Add key press handler for 'q' to cancel - // Ensure stdin is available and resume it if needed - if (process.stdin.isTTY) { - process.stdin.setRawMode(true); - process.stdin.resume(); - process.stdin.setEncoding('utf8'); - process.stdin.on('data', (key) => { - if (key === 'q' || key === '\\u0003') { - // 'q' or Ctrl+C - console.log( - chalk.yellow('\\nSetup canceled. No changes made.') - ); - process.exit(0); - } - }); - console.log( - chalk.gray('Press "q" at any time to cancel the setup.') - ); - } - - // --- Generate choices using the helper --- - const mainPromptData = getPromptData('main'); - const researchPromptData = getPromptData('research'); - const fallbackPromptData = getPromptData('fallback', true); // Allow 'None' for fallback - - const answers = await inquirer.prompt([ - { - type: 'list', - name: 'mainModel', - message: 'Select the main model for generation/updates:', - choices: mainPromptData.choices, - default: mainPromptData.default - }, - { - type: 'list', - name: 'researchModel', - message: 'Select the research model:', - choices: researchPromptData.choices, - default: researchPromptData.default, - when: (ans) => ans.mainModel !== '__CANCEL__' - }, - { - type: 'list', - name: 'fallbackModel', - message: 'Select the fallback model (optional):', - choices: fallbackPromptData.choices, - default: fallbackPromptData.default, - when: (ans) => - ans.mainModel !== '__CANCEL__' && - ans.researchModel !== '__CANCEL__' - } - ]); - - // Clean up the keypress handler - if (process.stdin.isTTY) { - process.stdin.pause(); - process.stdin.removeAllListeners('data'); - process.stdin.setRawMode(false); - } - - // Check if user canceled at any point - if ( - answers.mainModel === '__CANCEL__' || - answers.researchModel === '__CANCEL__' || - answers.fallbackModel === '__CANCEL__' - ) { - console.log(chalk.yellow('\\nSetup canceled. No changes made.')); - return; - } - - // Apply changes using setModel - let setupSuccess = true; - let setupConfigModified = false; - const coreOptionsSetup = { projectRoot }; // Pass root for setup actions - - if ( - answers.mainModel && - answers.mainModel?.id && - answers.mainModel.id !== currentModels.main?.modelId - ) { - const result = await setModel( - 'main', - answers.mainModel.id, - coreOptionsSetup - ); - if (result.success) { - console.log( - chalk.blue( - `Selected main model: ${result.data.provider} / ${result.data.modelId}` - ) - ); - setupConfigModified = true; - } else { - console.error( - chalk.red( - `Error setting main model: ${result.error?.message || 'Unknown'}` - ) - ); - setupSuccess = false; - } - } - - if ( - answers.researchModel && - answers.researchModel?.id && - answers.researchModel.id !== currentModels.research?.modelId - ) { - const result = await setModel( - 'research', - answers.researchModel.id, - coreOptionsSetup - ); - if (result.success) { - console.log( - chalk.blue( - `Selected research model: ${result.data.provider} / ${result.data.modelId}` - ) - ); - setupConfigModified = true; - } else { - console.error( - chalk.red( - `Error setting research model: ${result.error?.message || 'Unknown'}` - ) - ); - setupSuccess = false; - } - } - - // Set Fallback Model - Handle 'None' selection - const currentFallbackId = currentModels.fallback?.modelId; - const selectedFallbackValue = answers.fallbackModel; // Could be null or model object - const selectedFallbackId = selectedFallbackValue?.id; // Undefined if null - - if (selectedFallbackId !== currentFallbackId) { - // Compare IDs - if (selectedFallbackId) { - // User selected a specific fallback model - const result = await setModel( - 'fallback', - selectedFallbackId, - coreOptionsSetup - ); - if (result.success) { - console.log( - chalk.blue( - `Selected fallback model: ${result.data.provider} / ${result.data.modelId}` - ) - ); - setupConfigModified = true; - } else { - console.error( - chalk.red( - `Error setting fallback model: ${result.error?.message || 'Unknown'}` - ) - ); - setupSuccess = false; - } - } else if (currentFallbackId) { - // User selected 'None' but a fallback was previously set - // Need to explicitly clear it in the config file - const currentCfg = getConfig(projectRoot); // Pass root - if (currentCfg?.models?.fallback) { - // Check if fallback exists before clearing - currentCfg.models.fallback = { - ...currentCfg.models.fallback, - provider: undefined, - modelId: undefined - }; - if (writeConfig(currentCfg, projectRoot)) { - // Pass root - console.log(chalk.blue('Fallback model disabled.')); - setupConfigModified = true; - } else { - console.error( - chalk.red( - 'Failed to disable fallback model in config file.' - ) - ); - setupSuccess = false; - } - } else { - console.log(chalk.blue('Fallback model was already disabled.')); - } - } - // No action needed if fallback was already null/undefined and user selected None - } - - if (setupSuccess && setupConfigModified) { - console.log(chalk.green.bold('\\nModel setup complete!')); - } else if (setupSuccess && !setupConfigModified) { - console.log( - chalk.yellow('\\nNo changes made to model configuration.') - ); - } else if (!setupSuccess) { - console.error( - chalk.red( - '\\nErrors occurred during model selection. Please review and try again.' - ) - ); - } - return; // Exit after setup attempt - } - - // --- Default: Display Current Configuration --- - // Fetch configuration using the core function - PASS projectRoot - const result = await getModelConfiguration({ projectRoot }); - - if (!result.success) { - // Handle specific CONFIG_MISSING error gracefully - if (result.error?.code === 'CONFIG_MISSING') { - console.error( - boxen( - chalk.red.bold('Configuration File Missing!') + - '\n\n' + - chalk.white( - 'The .taskmasterconfig file was not found in your project root.\n\n' + - 'Run the interactive setup to create and configure it:' - ) + - '\n' + - chalk.green(' task-master models --setup'), - { - padding: 1, - margin: { top: 1 }, - borderColor: 'red', - borderStyle: 'round' - } - ) - ); - process.exit(0); // Exit gracefully, user needs to run setup - } else { - console.error( - chalk.red( - `Error fetching model configuration: ${result.error?.message || 'Unknown error'}` - ) - ); - process.exit(1); - } - } - - const configData = result.data; - const active = configData.activeModels; - const warnings = configData.warnings || []; // Warnings now come from core function - - // --- Display Warning Banner (if any) --- - if (warnings.length > 0) { - console.log( - boxen( - chalk.red.bold('API Key Warnings:') + - '\n\n' + - warnings.join('\n'), - { - padding: 1, - margin: { top: 1, bottom: 1 }, - borderColor: 'red', - borderStyle: 'round' - } - ) - ); - } - - // --- Active Configuration Section --- - console.log(chalk.cyan.bold('\nActive Model Configuration:')); - const activeTable = new Table({ - head: [ - 'Role', - 'Provider', - 'Model ID', - 'SWE Score', - 'Cost ($/1M tkns)', - 'API Key Status' - ].map((h) => chalk.cyan.bold(h)), - colWidths: [10, 14, 30, 18, 20, 28], - style: { head: ['cyan', 'bold'] } - }); - - // --- Helper functions for formatting (can be moved to ui.js if complex) --- - const formatSweScoreWithTertileStars = (score, allModels) => { - if (score === null || score === undefined || score <= 0) return 'N/A'; - const formattedPercentage = `${(score * 100).toFixed(1)}%`; - - const validScores = allModels - .map((m) => m.sweScore) - .filter((s) => s !== null && s !== undefined && s > 0); - const sortedScores = [...validScores].sort((a, b) => b - a); - const n = sortedScores.length; - let stars = chalk.gray('☆☆☆'); - - if (n > 0) { - const topThirdIndex = Math.max(0, Math.floor(n / 3) - 1); - const midThirdIndex = Math.max(0, Math.floor((2 * n) / 3) - 1); - if (score >= sortedScores[topThirdIndex]) - stars = chalk.yellow('★★★'); - else if (score >= sortedScores[midThirdIndex]) - stars = chalk.yellow('★★') + chalk.gray('☆'); - else stars = chalk.yellow('★') + chalk.gray('☆☆'); - } - return `${formattedPercentage} ${stars}`; - }; - - const formatCost = (costObj) => { - if (!costObj) return 'N/A'; - - // Check if both input and output costs are 0 and return "Free" - if (costObj.input === 0 && costObj.output === 0) { - return chalk.green('Free'); - } - - const formatSingleCost = (costValue) => { - if (costValue === null || costValue === undefined) return 'N/A'; - const isInteger = Number.isInteger(costValue); - return `$${costValue.toFixed(isInteger ? 0 : 2)}`; - }; - return `${formatSingleCost(costObj.input)} in, ${formatSingleCost( - costObj.output - )} out`; - }; - - const getCombinedStatus = (keyStatus) => { - const cliOk = keyStatus?.cli; - const mcpOk = keyStatus?.mcp; - const cliSymbol = cliOk ? chalk.green('✓') : chalk.red('✗'); - const mcpSymbol = mcpOk ? chalk.green('✓') : chalk.red('✗'); - - if (cliOk && mcpOk) return `${cliSymbol} CLI & ${mcpSymbol} MCP OK`; - if (cliOk && !mcpOk) - return `${cliSymbol} CLI OK / ${mcpSymbol} MCP Missing`; - if (!cliOk && mcpOk) - return `${cliSymbol} CLI Missing / ${mcpSymbol} MCP OK`; - return chalk.gray(`${cliSymbol} CLI & MCP Both Missing`); - }; - - // Get all available models data once for SWE Score calculation - const availableModelsResultForScore = await getAvailableModelsList(); - const allAvailModelsForScore = - availableModelsResultForScore.data?.models || []; - - // Populate Active Table - activeTable.push([ - chalk.white('Main'), - active.main.provider, - active.main.modelId, - formatSweScoreWithTertileStars( - active.main.sweScore, - allAvailModelsForScore - ), - formatCost(active.main.cost), - getCombinedStatus(active.main.keyStatus) - ]); - activeTable.push([ - chalk.white('Research'), - active.research.provider, - active.research.modelId, - formatSweScoreWithTertileStars( - active.research.sweScore, - allAvailModelsForScore - ), - formatCost(active.research.cost), - getCombinedStatus(active.research.keyStatus) - ]); - if (active.fallback) { - activeTable.push([ - chalk.white('Fallback'), - active.fallback.provider, - active.fallback.modelId, - formatSweScoreWithTertileStars( - active.fallback.sweScore, - allAvailModelsForScore - ), - formatCost(active.fallback.cost), - getCombinedStatus(active.fallback.keyStatus) - ]); - } - console.log(activeTable.toString()); - - // --- Available Models Section --- - const availableResult = await getAvailableModelsList(); - if (availableResult.success && availableResult.data.models.length > 0) { - console.log(chalk.cyan.bold('\nOther Available Models:')); - const availableTable = new Table({ - head: ['Provider', 'Model ID', 'SWE Score', 'Cost ($/1M tkns)'].map( - (h) => chalk.cyan.bold(h) - ), - colWidths: [15, 40, 18, 25], - style: { head: ['cyan', 'bold'] } - }); - availableResult.data.models.forEach((model) => { - availableTable.push([ - model.provider, - model.modelId, - formatSweScoreWithTertileStars( - model.sweScore, - allAvailModelsForScore - ), - formatCost(model.cost) - ]); - }); - console.log(availableTable.toString()); - } else if (availableResult.success) { - console.log( - chalk.gray('\n(All available models are currently configured)') - ); } else { - console.warn( - chalk.yellow( - `Could not fetch available models list: ${availableResult.error?.message}` + console.error( + chalk.red( + `❌ Error fetching configuration: ${configResult.error.message}` ) ); } + // Attempt to display other info even if config fails + } else { + // Pass available models list for SWE score formatting + displayModelConfiguration( + configResult.data, + availableResult.data?.models || [] + ); + } - // --- Suggested Actions Section --- - console.log( - boxen( - chalk.white.bold('Next Steps:') + - '\n' + - chalk.cyan( - `1. Set main model: ${chalk.yellow('task-master models --set-main ')}` - ) + - '\n' + - chalk.cyan( - `2. Set research model: ${chalk.yellow('task-master models --set-research ')}` - ) + - '\n' + - chalk.cyan( - `3. Set fallback model: ${chalk.yellow('task-master models --set-fallback ')}` - ) + - '\n' + - chalk.cyan( - `4. Run interactive setup: ${chalk.yellow('task-master models --setup')}` - ), - { - padding: 1, - borderColor: 'yellow', - borderStyle: 'round', - margin: { top: 1 } - } + // 2. Display API Key Status + if (apiKeyStatusResult.success) { + displayApiKeyStatus(apiKeyStatusResult.data.report); + } else { + console.error( + chalk.yellow( + `⚠️ Warning: Could not display API Key status: ${apiKeyStatusResult.error.message}` ) ); - } catch (error) { - // Catch errors specifically from the core model functions - console.error( - chalk.red(`Error processing models command: ${error.message}`) + } + + // 3. Display Other Available Models (Filtered) + if (availableResult.success) { + // Filter out models that are already actively configured and placeholders + const activeIds = configResult.success + ? [ + configResult.data.activeModels.main.modelId, + configResult.data.activeModels.research.modelId, + configResult.data.activeModels.fallback?.modelId + ].filter(Boolean) + : []; + const displayableAvailable = availableResult.data.models.filter( + (m) => !activeIds.includes(m.modelId) && !m.modelId.startsWith('[') // Exclude placeholders like [ollama-any] + ); + displayAvailableModels(displayableAvailable); // This function now includes the "Next Steps" box + } else { + console.error( + chalk.yellow( + `⚠️ Warning: Could not display available models: ${availableResult.error.message}` + ) + ); + } + + // 4. Conditional Hint if Config File is Missing + const configExists = isConfigFilePresent(projectRoot); // Re-check after potential setup/writes + if (!configExists) { + console.log( + chalk.yellow( + "\\nHint: Run 'task-master models --setup' to create or update your configuration." + ) ); - if (error instanceof ConfigurationError) { - // Provide specific guidance if it's a config error - console.error( - chalk.yellow( - 'This might be a configuration file issue. Try running `task-master models --setup`.' - ) - ); - } - if (getDebugFlag()) { - console.error(error.stack); - } - process.exit(1); } }); diff --git a/scripts/modules/config-manager.js b/scripts/modules/config-manager.js index fca7bd4d..e583419c 100644 --- a/scripts/modules/config-manager.js +++ b/scripts/modules/config-manager.js @@ -255,8 +255,6 @@ function getModelConfigForRole(role, explicitRoot = null) { const config = getConfig(explicitRoot); const roleConfig = config?.models?.[role]; if (!roleConfig) { - // This shouldn't happen if _loadAndValidateConfig ensures defaults - // But as a safety net, log and return defaults log( 'warn', `No model configuration found for role: ${role}. Returning default.` @@ -363,16 +361,64 @@ function getOllamaBaseUrl(explicitRoot = null) { } /** - * Gets model parameters (maxTokens, temperature) for a specific role. + * Gets model parameters (maxTokens, temperature) for a specific role, + * considering model-specific overrides from supported-models.json. * @param {string} role - The role ('main', 'research', 'fallback'). * @param {string|null} explicitRoot - Optional explicit path to the project root. * @returns {{maxTokens: number, temperature: number}} */ function getParametersForRole(role, explicitRoot = null) { const roleConfig = getModelConfigForRole(role, explicitRoot); + const roleMaxTokens = roleConfig.maxTokens; + const roleTemperature = roleConfig.temperature; + const modelId = roleConfig.modelId; + const providerName = roleConfig.provider; + + let effectiveMaxTokens = roleMaxTokens; // Start with the role's default + + try { + // Find the model definition in MODEL_MAP + const providerModels = MODEL_MAP[providerName]; + if (providerModels && Array.isArray(providerModels)) { + const modelDefinition = providerModels.find((m) => m.id === modelId); + + // Check if a model-specific max_tokens is defined and valid + if ( + modelDefinition && + typeof modelDefinition.max_tokens === 'number' && + modelDefinition.max_tokens > 0 + ) { + const modelSpecificMaxTokens = modelDefinition.max_tokens; + // Use the minimum of the role default and the model specific limit + effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens); + log( + 'debug', + `Applying model-specific max_tokens (${modelSpecificMaxTokens}) for ${modelId}. Effective limit: ${effectiveMaxTokens}` + ); + } else { + log( + 'debug', + `No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}` + ); + } + } else { + log( + 'debug', + `No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}` + ); + } + } catch (lookupError) { + log( + 'warn', + `Error looking up model-specific max_tokens for ${modelId}: ${lookupError.message}. Using role default: ${roleMaxTokens}` + ); + // Fallback to role default on error + effectiveMaxTokens = roleMaxTokens; + } + return { - maxTokens: roleConfig.maxTokens, - temperature: roleConfig.temperature + maxTokens: effectiveMaxTokens, + temperature: roleTemperature }; } @@ -385,16 +431,19 @@ function getParametersForRole(role, explicitRoot = null) { */ function isApiKeySet(providerName, session = null) { // Define the expected environment variable name for each provider + if (providerName?.toLowerCase() === 'ollama') { + return true; // Indicate key status is effectively "OK" + } + const keyMap = { openai: 'OPENAI_API_KEY', anthropic: 'ANTHROPIC_API_KEY', google: 'GOOGLE_API_KEY', perplexity: 'PERPLEXITY_API_KEY', mistral: 'MISTRAL_API_KEY', - azure: 'AZURE_OPENAI_API_KEY', // Azure needs endpoint too, but key presence is a start + azure: 'AZURE_OPENAI_API_KEY', openrouter: 'OPENROUTER_API_KEY', - xai: 'XAI_API_KEY', - ollama: 'OLLAMA_API_KEY' + xai: 'XAI_API_KEY' // Add other providers as needed }; @@ -405,8 +454,15 @@ function isApiKeySet(providerName, session = null) { } const envVarName = keyMap[providerKey]; - // Use resolveEnvVariable to check both process.env and session.env - return !!resolveEnvVariable(envVarName, session); + const apiKeyValue = resolveEnvVariable(envVarName, session); + + // Check if the key exists, is not empty, and is not a placeholder + return ( + apiKeyValue && + apiKeyValue.trim() !== '' && + !/YOUR_.*_API_KEY_HERE/.test(apiKeyValue) && // General placeholder check + !apiKeyValue.includes('KEY_HERE') + ); // Another common placeholder pattern } /** @@ -482,7 +538,7 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) { return false; // Unknown provider } - return !!apiKeyToCheck && apiKeyToCheck !== placeholderValue; + return !!apiKeyToCheck && !/KEY_HERE$/.test(apiKeyToCheck); } catch (error) { console.error( chalk.red(`Error reading or parsing .cursor/mcp.json: ${error.message}`) @@ -589,6 +645,14 @@ function isConfigFilePresent(explicitRoot = null) { return fs.existsSync(configPath); } +/** + * Gets a list of all provider names defined in the MODEL_MAP. + * @returns {string[]} An array of provider names. + */ +function getAllProviders() { + return Object.keys(MODEL_MAP || {}); +} + export { // Core config access getConfig, @@ -628,5 +692,8 @@ export { // API Key Checkers (still relevant) isApiKeySet, - getMcpApiKeyStatus + getMcpApiKeyStatus, + + // ADD: Function to get all provider names + getAllProviders }; diff --git a/scripts/modules/supported-models.json b/scripts/modules/supported-models.json index 5d4bca96..63278d26 100644 --- a/scripts/modules/supported-models.json +++ b/scripts/modules/supported-models.json @@ -4,25 +4,29 @@ "id": "claude-3-7-sonnet-20250219", "swe_score": 0.623, "cost_per_1m_tokens": { "input": 3.0, "output": 15.0 }, - "allowed_roles": ["main", "fallback"] + "allowed_roles": ["main", "fallback"], + "max_tokens": 120000 }, { "id": "claude-3-5-sonnet-20241022", "swe_score": 0.49, "cost_per_1m_tokens": { "input": 3.0, "output": 15.0 }, - "allowed_roles": ["main", "fallback"] + "allowed_roles": ["main", "fallback"], + "max_tokens": 64000 }, { "id": "claude-3-5-haiku-20241022", "swe_score": 0.406, "cost_per_1m_tokens": { "input": 0.8, "output": 4.0 }, - "allowed_roles": ["main", "fallback"] + "allowed_roles": ["main", "fallback"], + "max_tokens": 64000 }, { "id": "claude-3-opus-20240229", "swe_score": 0, "cost_per_1m_tokens": { "input": 15, "output": 75 }, - "allowed_roles": ["main", "fallback"] + "allowed_roles": ["main", "fallback"], + "max_tokens": 64000 } ], "openai": [ @@ -48,7 +52,8 @@ "id": "o3-mini", "swe_score": 0.493, "cost_per_1m_tokens": { "input": 1.1, "output": 4.4 }, - "allowed_roles": ["main", "fallback"] + "allowed_roles": ["main", "fallback"], + "max_tokens": 100000 }, { "id": "o4-mini", @@ -68,12 +73,6 @@ "cost_per_1m_tokens": { "input": 150.0, "output": 600.0 }, "allowed_roles": ["main", "fallback"] }, - { - "id": "gpt-4-1", - "swe_score": 0.55, - "cost_per_1m_tokens": { "input": 2.0, "output": 8.0 }, - "allowed_roles": ["main", "fallback"] - }, { "id": "gpt-4-5-preview", "swe_score": 0.38, @@ -148,31 +147,36 @@ "id": "sonar-pro", "swe_score": 0, "cost_per_1m_tokens": { "input": 3, "output": 15 }, - "allowed_roles": ["research"] + "allowed_roles": ["research"], + "max_tokens": 8700 }, { "id": "sonar", "swe_score": 0, "cost_per_1m_tokens": { "input": 1, "output": 1 }, - "allowed_roles": ["research"] + "allowed_roles": ["research"], + "max_tokens": 8700 }, { "id": "deep-research", "swe_score": 0.211, "cost_per_1m_tokens": { "input": 2, "output": 8 }, - "allowed_roles": ["research"] + "allowed_roles": ["research"], + "max_tokens": 8700 }, { "id": "sonar-reasoning-pro", "swe_score": 0.211, "cost_per_1m_tokens": { "input": 2, "output": 8 }, - "allowed_roles": ["main", "fallback"] + "allowed_roles": ["main", "fallback"], + "max_tokens": 8700 }, { "id": "sonar-reasoning", "swe_score": 0.211, "cost_per_1m_tokens": { "input": 1, "output": 5 }, - "allowed_roles": ["main", "fallback"] + "allowed_roles": ["main", "fallback"], + "max_tokens": 8700 } ], "ollama": [ diff --git a/scripts/modules/task-manager/models.js b/scripts/modules/task-manager/models.js index 612fbf38..2cfb060d 100644 --- a/scripts/modules/task-manager/models.js +++ b/scripts/modules/task-manager/models.js @@ -17,7 +17,8 @@ import { getMcpApiKeyStatus, getConfig, writeConfig, - isConfigFilePresent + isConfigFilePresent, + getAllProviders } from '../config-manager.js'; /** @@ -382,4 +383,61 @@ async function setModel(role, modelId, options = {}) { } } -export { getModelConfiguration, getAvailableModelsList, setModel }; +/** + * Get API key status for all known providers. + * @param {Object} [options] - Options for the operation + * @param {Object} [options.session] - Session object containing environment variables (for MCP) + * @param {Function} [options.mcpLog] - MCP logger object (for MCP) + * @param {string} [options.projectRoot] - Project root directory + * @returns {Object} RESTful response with API key status report + */ +async function getApiKeyStatusReport(options = {}) { + const { mcpLog, projectRoot, session } = options; + const report = (level, ...args) => { + if (mcpLog && typeof mcpLog[level] === 'function') { + mcpLog[level](...args); + } + }; + + try { + const providers = getAllProviders(); + const providersToCheck = providers.filter( + (p) => p.toLowerCase() !== 'ollama' + ); // Ollama is not a provider, it's a service, doesn't need an api key usually + const statusReport = providersToCheck.map((provider) => { + // Use provided projectRoot for MCP status check + const cliOk = isApiKeySet(provider, session); // Pass session for CLI check too + const mcpOk = getMcpApiKeyStatus(provider, projectRoot); + return { + provider, + cli: cliOk, + mcp: mcpOk + }; + }); + + report('info', 'Successfully generated API key status report.'); + return { + success: true, + data: { + report: statusReport, + message: 'API key status report generated.' + } + }; + } catch (error) { + report('error', `Error generating API key status report: ${error.message}`); + return { + success: false, + error: { + code: 'API_KEY_STATUS_ERROR', + message: error.message + } + }; + } +} + +export { + getModelConfiguration, + getAvailableModelsList, + setModel, + getApiKeyStatusReport +}; diff --git a/scripts/modules/ui.js b/scripts/modules/ui.js index 093170e0..9c07c48d 100644 --- a/scripts/modules/ui.js +++ b/scripts/modules/ui.js @@ -1814,6 +1814,210 @@ async function confirmTaskOverwrite(tasksPath) { return answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes'; } +/** + * Displays the API key status for different providers. + * @param {Array<{provider: string, cli: boolean, mcp: boolean}>} statusReport - The report generated by getApiKeyStatusReport. + */ +function displayApiKeyStatus(statusReport) { + if (!statusReport || statusReport.length === 0) { + console.log(chalk.yellow('No API key status information available.')); + return; + } + + const table = new Table({ + head: [ + chalk.cyan('Provider'), + chalk.cyan('CLI Key (.env)'), + chalk.cyan('MCP Key (mcp.json)') + ], + colWidths: [15, 20, 25], + chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' } + }); + + statusReport.forEach(({ provider, cli, mcp }) => { + const cliStatus = cli ? chalk.green('✅ Found') : chalk.red('❌ Missing'); + const mcpStatus = mcp ? chalk.green('✅ Found') : chalk.red('❌ Missing'); + // Capitalize provider name for display + const providerName = provider.charAt(0).toUpperCase() + provider.slice(1); + table.push([providerName, cliStatus, mcpStatus]); + }); + + console.log(chalk.bold('\n🔑 API Key Status:')); + console.log(table.toString()); + console.log( + chalk.gray( + ' Note: Some providers (e.g., Azure, Ollama) may require additional endpoint configuration in .taskmasterconfig.' + ) + ); +} + +// --- Formatting Helpers (Potentially move some to utils.js if reusable) --- + +const formatSweScoreWithTertileStars = (score, allModels) => { + // ... (Implementation from previous version or refine) ... + if (score === null || score === undefined || score <= 0) return 'N/A'; + const formattedPercentage = `${(score * 100).toFixed(1)}%`; + + const validScores = allModels + .map((m) => m.sweScore) + .filter((s) => s !== null && s !== undefined && s > 0); + const sortedScores = [...validScores].sort((a, b) => b - a); + const n = sortedScores.length; + let stars = chalk.gray('☆☆☆'); + + if (n > 0) { + const topThirdIndex = Math.max(0, Math.floor(n / 3) - 1); + const midThirdIndex = Math.max(0, Math.floor((2 * n) / 3) - 1); + if (score >= sortedScores[topThirdIndex]) stars = chalk.yellow('★★★'); + else if (score >= sortedScores[midThirdIndex]) + stars = chalk.yellow('★★') + chalk.gray('☆'); + else stars = chalk.yellow('★') + chalk.gray('☆☆'); + } + return `${formattedPercentage} ${stars}`; +}; + +const formatCost = (costObj) => { + // ... (Implementation from previous version or refine) ... + if (!costObj) return 'N/A'; + if (costObj.input === 0 && costObj.output === 0) { + return chalk.green('Free'); + } + const formatSingleCost = (costValue) => { + if (costValue === null || costValue === undefined) return 'N/A'; + const isInteger = Number.isInteger(costValue); + return `$${costValue.toFixed(isInteger ? 0 : 2)}`; + }; + return `${formatSingleCost(costObj.input)} in, ${formatSingleCost(costObj.output)} out`; +}; + +// --- Display Functions --- + +/** + * Displays the currently configured active models. + * @param {ConfigData} configData - The active configuration data. + * @param {AvailableModel[]} allAvailableModels - Needed for SWE score tertiles. + */ +function displayModelConfiguration(configData, allAvailableModels = []) { + console.log(chalk.cyan.bold('\nActive Model Configuration:')); + const active = configData.activeModels; + const activeTable = new Table({ + head: [ + 'Role', + 'Provider', + 'Model ID', + 'SWE Score', + 'Cost ($/1M tkns)' + // 'API Key Status' // Removed, handled by separate displayApiKeyStatus + ].map((h) => chalk.cyan.bold(h)), + colWidths: [10, 14, 30, 18, 20 /*, 28 */], // Adjusted widths + style: { head: ['cyan', 'bold'] } + }); + + activeTable.push([ + chalk.white('Main'), + active.main.provider, + active.main.modelId, + formatSweScoreWithTertileStars(active.main.sweScore, allAvailableModels), + formatCost(active.main.cost) + // getCombinedStatus(active.main.keyStatus) // Removed + ]); + activeTable.push([ + chalk.white('Research'), + active.research.provider, + active.research.modelId, + formatSweScoreWithTertileStars( + active.research.sweScore, + allAvailableModels + ), + formatCost(active.research.cost) + // getCombinedStatus(active.research.keyStatus) // Removed + ]); + if (active.fallback && active.fallback.provider && active.fallback.modelId) { + activeTable.push([ + chalk.white('Fallback'), + active.fallback.provider, + active.fallback.modelId, + formatSweScoreWithTertileStars( + active.fallback.sweScore, + allAvailableModels + ), + formatCost(active.fallback.cost) + // getCombinedStatus(active.fallback.keyStatus) // Removed + ]); + } else { + activeTable.push([ + chalk.white('Fallback'), + chalk.gray('-'), + chalk.gray('(Not Set)'), + chalk.gray('-'), + chalk.gray('-') + // chalk.gray('-') // Removed + ]); + } + console.log(activeTable.toString()); +} + +/** + * Displays the list of available models not currently configured. + * @param {AvailableModel[]} availableModels - List of available models. + */ +function displayAvailableModels(availableModels) { + if (!availableModels || availableModels.length === 0) { + console.log( + chalk.gray('\n(No other models available or all are configured)') + ); + return; + } + + console.log(chalk.cyan.bold('\nOther Available Models:')); + const availableTable = new Table({ + head: ['Provider', 'Model ID', 'SWE Score', 'Cost ($/1M tkns)'].map((h) => + chalk.cyan.bold(h) + ), + colWidths: [15, 40, 18, 25], + style: { head: ['cyan', 'bold'] } + }); + + availableModels.forEach((model) => { + availableTable.push([ + model.provider, + model.modelId, + formatSweScoreWithTertileStars(model.sweScore, availableModels), // Pass itself for comparison + formatCost(model.cost) + ]); + }); + console.log(availableTable.toString()); + + // --- Suggested Actions Section (moved here from models command) --- + console.log( + boxen( + chalk.white.bold('Next Steps:') + + '\n' + + chalk.cyan( + `1. Set main model: ${chalk.yellow('task-master models --set-main ')}` + ) + + '\n' + + chalk.cyan( + `2. Set research model: ${chalk.yellow('task-master models --set-research ')}` + ) + + '\n' + + chalk.cyan( + `3. Set fallback model: ${chalk.yellow('task-master models --set-fallback ')}` + ) + + '\n' + + chalk.cyan( + `4. Run interactive setup: ${chalk.yellow('task-master models --setup')}` + ), + { + padding: 1, + borderColor: 'yellow', + borderStyle: 'round', + margin: { top: 1 } + } + ) + ); +} + // Export UI functions export { displayBanner, @@ -1828,5 +2032,8 @@ export { displayTaskById, displayComplexityReport, generateComplexityAnalysisPrompt, - confirmTaskOverwrite + confirmTaskOverwrite, + displayApiKeyStatus, + displayModelConfiguration, + displayAvailableModels }; diff --git a/src/ai-providers/openai.js b/src/ai-providers/openai.js new file mode 100644 index 00000000..ce34e957 --- /dev/null +++ b/src/ai-providers/openai.js @@ -0,0 +1,176 @@ +import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK +import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai' +import { log } from '../../scripts/modules/utils.js'; + +/** + * Generates text using OpenAI models via Vercel AI SDK. + * + * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature. + * @returns {Promise} The generated text content. + * @throws {Error} If API call fails. + */ +export async function generateOpenAIText(params) { + const { apiKey, modelId, messages, maxTokens, temperature } = params; + log('debug', `generateOpenAIText called with model: ${modelId}`); + + if (!apiKey) { + throw new Error('OpenAI API key is required.'); + } + if (!modelId) { + throw new Error('OpenAI Model ID is required.'); + } + if (!messages || !Array.isArray(messages) || messages.length === 0) { + throw new Error('Invalid or empty messages array provided for OpenAI.'); + } + + const openaiClient = createOpenAI({ apiKey }); + + try { + const result = await openaiClient.chat(messages, { + // Updated: Use openaiClient.chat directly + model: modelId, + max_tokens: maxTokens, + temperature + }); + + // Adjust based on actual Vercel SDK response structure for openaiClient.chat + // This might need refinement based on testing the SDK's output. + const textContent = result?.choices?.[0]?.message?.content?.trim(); + + if (!textContent) { + log( + 'warn', + 'OpenAI generateText response did not contain expected content.', + { result } + ); + throw new Error('Failed to extract content from OpenAI response.'); + } + log( + 'debug', + `OpenAI generateText completed successfully for model: ${modelId}` + ); + return textContent; + } catch (error) { + log( + 'error', + `Error in generateOpenAIText (Model: ${modelId}): ${error.message}`, + { error } + ); + throw new Error( + `OpenAI API error during text generation: ${error.message}` + ); + } +} + +/** + * Streams text using OpenAI models via Vercel AI SDK. + * + * @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature. + * @returns {Promise} A readable stream of text deltas. + * @throws {Error} If API call fails. + */ +export async function streamOpenAIText(params) { + const { apiKey, modelId, messages, maxTokens, temperature } = params; + log('debug', `streamOpenAIText called with model: ${modelId}`); + + if (!apiKey) { + throw new Error('OpenAI API key is required.'); + } + if (!modelId) { + throw new Error('OpenAI Model ID is required.'); + } + if (!messages || !Array.isArray(messages) || messages.length === 0) { + throw new Error( + 'Invalid or empty messages array provided for OpenAI streaming.' + ); + } + + const openaiClient = createOpenAI({ apiKey }); + + try { + // Use the streamText function from Vercel AI SDK core + const stream = await openaiClient.chat.stream(messages, { + // Updated: Use openaiClient.chat.stream + model: modelId, + max_tokens: maxTokens, + temperature + }); + + log( + 'debug', + `OpenAI streamText initiated successfully for model: ${modelId}` + ); + // The Vercel SDK's streamText should directly return the stream object + return stream; + } catch (error) { + log( + 'error', + `Error initiating OpenAI stream (Model: ${modelId}): ${error.message}`, + { error } + ); + throw new Error( + `OpenAI API error during streaming initiation: ${error.message}` + ); + } +} + +/** + * Generates structured objects using OpenAI models via Vercel AI SDK. + * + * @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature. + * @returns {Promise} The generated object matching the schema. + * @throws {Error} If API call fails or object generation fails. + */ +export async function generateOpenAIObject(params) { + const { + apiKey, + modelId, + messages, + schema, + objectName, + maxTokens, + temperature + } = params; + log( + 'debug', + `generateOpenAIObject called with model: ${modelId}, object: ${objectName}` + ); + + if (!apiKey) throw new Error('OpenAI API key is required.'); + if (!modelId) throw new Error('OpenAI Model ID is required.'); + if (!messages || !Array.isArray(messages) || messages.length === 0) + throw new Error('Invalid messages array for OpenAI object generation.'); + if (!schema) + throw new Error('Schema is required for OpenAI object generation.'); + if (!objectName) + throw new Error('Object name is required for OpenAI object generation.'); + + const openaiClient = createOpenAI({ apiKey }); + + try { + // Use the imported generateObject function from 'ai' package + const result = await generateObject({ + model: openaiClient(modelId), + schema: schema, + messages: messages, + mode: 'tool', + maxTokens: maxTokens, + temperature: temperature + }); + + log( + 'debug', + `OpenAI generateObject completed successfully for model: ${modelId}` + ); + return result.object; + } catch (error) { + log( + 'error', + `Error in generateOpenAIObject (Model: ${modelId}, Object: ${objectName}): ${error.message}`, + { error } + ); + throw new Error( + `OpenAI API error during object generation: ${error.message}` + ); + } +} diff --git a/tasks/task_035.txt b/tasks/task_035.txt index 6f7aca5d..0f113c51 100644 --- a/tasks/task_035.txt +++ b/tasks/task_035.txt @@ -1,6 +1,6 @@ # Task ID: 35 # Title: Integrate Grok3 API for Research Capabilities -# Status: pending +# Status: cancelled # Dependencies: None # Priority: medium # Description: Replace the current Perplexity API integration with Grok3 API for all research-related functionalities while maintaining existing feature parity. diff --git a/tasks/task_061.txt b/tasks/task_061.txt index 8a561686..d487d897 100644 --- a/tasks/task_061.txt +++ b/tasks/task_061.txt @@ -1336,12 +1336,257 @@ When testing the non-streaming `generateTextService` call in `updateSubtaskById` ### Details: -## 22. Implement `openai.js` Provider Module using Vercel AI SDK [deferred] +## 22. Implement `openai.js` Provider Module using Vercel AI SDK [in-progress] ### Dependencies: None ### Description: Create and implement the `openai.js` module within `src/ai-providers/`. This module should contain functions to interact with the OpenAI API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. (Optional, implement if OpenAI models are needed). ### Details: + +```javascript +// Implementation details for openai.js provider module + +import { createOpenAI } from 'ai'; + +/** + * Generates text using OpenAI models via Vercel AI SDK + * + * @param {Object} params - Configuration parameters + * @param {string} params.apiKey - OpenAI API key + * @param {string} params.modelId - Model ID (e.g., 'gpt-4', 'gpt-3.5-turbo') + * @param {Array} params.messages - Array of message objects with role and content + * @param {number} [params.maxTokens] - Maximum tokens to generate + * @param {number} [params.temperature=0.7] - Sampling temperature (0-1) + * @returns {Promise} The generated text response + */ +export async function generateOpenAIText(params) { + try { + const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params; + + if (!apiKey) throw new Error('OpenAI API key is required'); + if (!modelId) throw new Error('Model ID is required'); + if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required'); + + const openai = createOpenAI({ apiKey }); + + const response = await openai.chat.completions.create({ + model: modelId, + messages, + max_tokens: maxTokens, + temperature, + }); + + return response.choices[0].message.content; + } catch (error) { + console.error('OpenAI text generation error:', error); + throw new Error(`OpenAI API error: ${error.message}`); + } +} + +/** + * Streams text using OpenAI models via Vercel AI SDK + * + * @param {Object} params - Configuration parameters (same as generateOpenAIText) + * @returns {ReadableStream} A stream of text chunks + */ +export async function streamOpenAIText(params) { + try { + const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params; + + if (!apiKey) throw new Error('OpenAI API key is required'); + if (!modelId) throw new Error('Model ID is required'); + if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required'); + + const openai = createOpenAI({ apiKey }); + + const stream = await openai.chat.completions.create({ + model: modelId, + messages, + max_tokens: maxTokens, + temperature, + stream: true, + }); + + return stream; + } catch (error) { + console.error('OpenAI streaming error:', error); + throw new Error(`OpenAI streaming error: ${error.message}`); + } +} + +/** + * Generates a structured object using OpenAI models via Vercel AI SDK + * + * @param {Object} params - Configuration parameters + * @param {string} params.apiKey - OpenAI API key + * @param {string} params.modelId - Model ID (e.g., 'gpt-4', 'gpt-3.5-turbo') + * @param {Array} params.messages - Array of message objects + * @param {Object} params.schema - JSON schema for the response object + * @param {string} params.objectName - Name of the object to generate + * @returns {Promise} The generated structured object + */ +export async function generateOpenAIObject(params) { + try { + const { apiKey, modelId, messages, schema, objectName } = params; + + if (!apiKey) throw new Error('OpenAI API key is required'); + if (!modelId) throw new Error('Model ID is required'); + if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required'); + if (!schema) throw new Error('Schema is required'); + if (!objectName) throw new Error('Object name is required'); + + const openai = createOpenAI({ apiKey }); + + // Using the Vercel AI SDK's function calling capabilities + const response = await openai.chat.completions.create({ + model: modelId, + messages, + functions: [ + { + name: objectName, + description: `Generate a ${objectName} object`, + parameters: schema, + }, + ], + function_call: { name: objectName }, + }); + + const functionCall = response.choices[0].message.function_call; + return JSON.parse(functionCall.arguments); + } catch (error) { + console.error('OpenAI object generation error:', error); + throw new Error(`OpenAI object generation error: ${error.message}`); + } +} +``` + + + + +```javascript +// Additional implementation notes for openai.js + +/** + * Export a provider info object for OpenAI + */ +export const providerInfo = { + id: 'openai', + name: 'OpenAI', + description: 'OpenAI API integration using Vercel AI SDK', + models: { + 'gpt-4': { + id: 'gpt-4', + name: 'GPT-4', + contextWindow: 8192, + supportsFunctions: true, + }, + 'gpt-4-turbo': { + id: 'gpt-4-turbo', + name: 'GPT-4 Turbo', + contextWindow: 128000, + supportsFunctions: true, + }, + 'gpt-3.5-turbo': { + id: 'gpt-3.5-turbo', + name: 'GPT-3.5 Turbo', + contextWindow: 16385, + supportsFunctions: true, + } + } +}; + +/** + * Helper function to format error responses consistently + * + * @param {Error} error - The caught error + * @param {string} operation - The operation being performed + * @returns {Error} A formatted error + */ +function formatError(error, operation) { + // Extract OpenAI specific error details if available + const statusCode = error.status || error.statusCode; + const errorType = error.type || error.code || 'unknown_error'; + + // Create a more detailed error message + const message = `OpenAI ${operation} error (${errorType}): ${error.message}`; + + // Create a new error with the formatted message + const formattedError = new Error(message); + + // Add additional properties for debugging + formattedError.originalError = error; + formattedError.provider = 'openai'; + formattedError.statusCode = statusCode; + formattedError.errorType = errorType; + + return formattedError; +} + +/** + * Example usage with the unified AI services interface: + * + * // In ai-services-unified.js + * import * as openaiProvider from './ai-providers/openai.js'; + * + * export async function generateText(params) { + * switch(params.provider) { + * case 'openai': + * return openaiProvider.generateOpenAIText(params); + * // other providers... + * } + * } + */ + +// Note: For proper error handling with the Vercel AI SDK, you may need to: +// 1. Check for rate limiting errors (429) +// 2. Handle token context window exceeded errors +// 3. Implement exponential backoff for retries on 5xx errors +// 4. Parse streaming errors properly from the ReadableStream +``` + + + + +```javascript +// Correction for openai.js provider module + +// IMPORTANT: Use the correct import from Vercel AI SDK +import { createOpenAI, openai } from '@ai-sdk/openai'; + +// Note: Before using this module, install the required dependency: +// npm install @ai-sdk/openai + +// The rest of the implementation remains the same, but uses the correct imports. +// When implementing this module, ensure your package.json includes this dependency. + +// For streaming implementations with the Vercel AI SDK, you can also use the +// streamText and experimental streamUI methods: + +/** + * Example of using streamText for simpler streaming implementation + */ +export async function streamOpenAITextSimplified(params) { + try { + const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params; + + if (!apiKey) throw new Error('OpenAI API key is required'); + + const openaiClient = createOpenAI({ apiKey }); + + return openaiClient.streamText({ + model: modelId, + messages, + temperature, + maxTokens, + }); + } catch (error) { + console.error('OpenAI streaming error:', error); + throw new Error(`OpenAI streaming error: ${error.message}`); + } +} +``` + + ## 23. Implement Conditional Provider Logic in `ai-services-unified.js` [done] ### Dependencies: 61.20,61.21,61.22,61.24,61.25,61.26,61.27,61.28,61.29,61.30,61.34 ### Description: Implement logic within the functions of `ai-services-unified.js` (e.g., `generateTextService`, `generateObjectService`, `streamChatService`) to dynamically select and call the appropriate provider module (`anthropic.js`, `perplexity.js`, etc.) based on configuration (e.g., environment variables like `AI_PROVIDER` and `AI_MODEL` from `process.env` or `session.env`). @@ -1425,7 +1670,7 @@ function checkProviderCapability(provider, capability) { ``` -## 24. Implement `google.js` Provider Module using Vercel AI SDK [pending] +## 24. Implement `google.js` Provider Module using Vercel AI SDK [done] ### Dependencies: None ### Description: Create and implement the `google.js` module within `src/ai-providers/`. This module should contain functions to interact with Google AI models (e.g., Gemini) using the **Vercel AI SDK (`@ai-sdk/google`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`. ### Details: diff --git a/tasks/task_070.txt b/tasks/task_070.txt new file mode 100644 index 00000000..c93d7960 --- /dev/null +++ b/tasks/task_070.txt @@ -0,0 +1,11 @@ +# Task ID: 70 +# Title: Implement 'diagram' command for Mermaid diagram generation +# Status: pending +# Dependencies: None +# Priority: medium +# Description: Develop a CLI command named 'diagram' that generates Mermaid diagrams to visualize task dependencies and workflows, with options to target specific tasks or generate comprehensive diagrams for all tasks. +# Details: +The task involves implementing a new command that accepts an optional '--id' parameter: if provided, the command generates a diagram illustrating the chosen task and its dependencies; if omitted, it produces a diagram that includes all tasks. The diagrams should use color coding to reflect task status and arrows to denote dependencies. In addition to CLI rendering, the command should offer an option to save the output as a Markdown (.md) file. Consider integrating with the existing task management system to pull task details and status. Pay attention to formatting consistency and error handling for invalid or missing task IDs. Comments should be added to the code to improve maintainability, and unit tests should cover edge cases such as cyclic dependencies, missing tasks, and invalid input formats. + +# Test Strategy: +Verify the command functionality by testing with both specific task IDs and general invocation: 1) Run the command with a valid '--id' and ensure the resulting diagram accurately depicts the specified task's dependencies with correct color codings for statuses. 2) Execute the command without '--id' to ensure a complete workflow diagram is generated for all tasks. 3) Check that arrows correctly represent dependency relationships. 4) Validate the Markdown (.md) file export option by confirming the file format and content after saving. 5) Test error responses for non-existent task IDs and malformed inputs. diff --git a/tasks/task_071.txt b/tasks/task_071.txt new file mode 100644 index 00000000..557ee5df --- /dev/null +++ b/tasks/task_071.txt @@ -0,0 +1,23 @@ +# Task ID: 71 +# Title: Add Model-Specific maxTokens Override Configuration +# Status: pending +# Dependencies: None +# Priority: high +# Description: Implement functionality to allow specifying a maximum token limit for individual AI models within .taskmasterconfig, overriding the role-based maxTokens if the model-specific limit is lower. +# Details: +1. **Modify `.taskmasterconfig` Structure:** Add a new top-level section `modelOverrides` (e.g., `"modelOverrides": { "o3-mini": { "maxTokens": 100000 } }`). +2. **Update `config-manager.js`:** + - Modify config loading to read the new `modelOverrides` section. + - Update `getParametersForRole(role)` logic: Fetch role defaults (roleMaxTokens, temperature). Get the modelId for the role. Look up `modelOverrides[modelId].maxTokens` (modelSpecificMaxTokens). Calculate `effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens ?? Infinity)`. Return `{ maxTokens: effectiveMaxTokens, temperature }`. +3. **Update Documentation:** Add an example of `modelOverrides` to `.taskmasterconfig.example` or relevant documentation. + +# Test Strategy: +1. **Unit Tests (`config-manager.js`):** + - Verify `getParametersForRole` returns role defaults when no override exists. + - Verify `getParametersForRole` returns the lower model-specific limit when an override exists and is lower. + - Verify `getParametersForRole` returns the role limit when an override exists but is higher. + - Verify handling of missing `modelOverrides` section. +2. **Integration Tests (`ai-services-unified.js`):** + - Call an AI service (e.g., `generateTextService`) with a config having a model override. + - Mock the underlying provider function. + - Assert that the `maxTokens` value passed to the mocked provider function matches the expected (potentially overridden) minimum value. diff --git a/tasks/tasks.json b/tasks/tasks.json index ddaf717e..0f3b1a57 100644 --- a/tasks/tasks.json +++ b/tasks/tasks.json @@ -2288,7 +2288,7 @@ "id": 35, "title": "Integrate Grok3 API for Research Capabilities", "description": "Replace the current Perplexity API integration with Grok3 API for all research-related functionalities while maintaining existing feature parity.", - "status": "pending", + "status": "cancelled", "dependencies": [], "priority": "medium", "details": "This task involves migrating from Perplexity to Grok3 API for research capabilities throughout the application. Implementation steps include:\n\n1. Create a new API client module for Grok3 in `src/api/grok3.ts` that handles authentication, request formatting, and response parsing\n2. Update the research service layer to use the new Grok3 client instead of Perplexity\n3. Modify the request payload structure to match Grok3's expected format (parameters like temperature, max_tokens, etc.)\n4. Update response handling to properly parse and extract Grok3's response format\n5. Implement proper error handling for Grok3-specific error codes and messages\n6. Update environment variables and configuration files to include Grok3 API keys and endpoints\n7. Ensure rate limiting and quota management are properly implemented according to Grok3's specifications\n8. Update any UI components that display research provider information to show Grok3 instead of Perplexity\n9. Maintain backward compatibility for any stored research results from Perplexity\n10. Document the new API integration in the developer documentation\n\nGrok3 API has different parameter requirements and response formats compared to Perplexity, so careful attention must be paid to these differences during implementation.", @@ -3231,8 +3231,8 @@ "id": 22, "title": "Implement `openai.js` Provider Module using Vercel AI SDK", "description": "Create and implement the `openai.js` module within `src/ai-providers/`. This module should contain functions to interact with the OpenAI API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. (Optional, implement if OpenAI models are needed).", - "details": "", - "status": "deferred", + "details": "\n\n\n```javascript\n// Implementation details for openai.js provider module\n\nimport { createOpenAI } from 'ai';\n\n/**\n * Generates text using OpenAI models via Vercel AI SDK\n * \n * @param {Object} params - Configuration parameters\n * @param {string} params.apiKey - OpenAI API key\n * @param {string} params.modelId - Model ID (e.g., 'gpt-4', 'gpt-3.5-turbo')\n * @param {Array} params.messages - Array of message objects with role and content\n * @param {number} [params.maxTokens] - Maximum tokens to generate\n * @param {number} [params.temperature=0.7] - Sampling temperature (0-1)\n * @returns {Promise} The generated text response\n */\nexport async function generateOpenAIText(params) {\n try {\n const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params;\n \n if (!apiKey) throw new Error('OpenAI API key is required');\n if (!modelId) throw new Error('Model ID is required');\n if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required');\n \n const openai = createOpenAI({ apiKey });\n \n const response = await openai.chat.completions.create({\n model: modelId,\n messages,\n max_tokens: maxTokens,\n temperature,\n });\n \n return response.choices[0].message.content;\n } catch (error) {\n console.error('OpenAI text generation error:', error);\n throw new Error(`OpenAI API error: ${error.message}`);\n }\n}\n\n/**\n * Streams text using OpenAI models via Vercel AI SDK\n * \n * @param {Object} params - Configuration parameters (same as generateOpenAIText)\n * @returns {ReadableStream} A stream of text chunks\n */\nexport async function streamOpenAIText(params) {\n try {\n const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params;\n \n if (!apiKey) throw new Error('OpenAI API key is required');\n if (!modelId) throw new Error('Model ID is required');\n if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required');\n \n const openai = createOpenAI({ apiKey });\n \n const stream = await openai.chat.completions.create({\n model: modelId,\n messages,\n max_tokens: maxTokens,\n temperature,\n stream: true,\n });\n \n return stream;\n } catch (error) {\n console.error('OpenAI streaming error:', error);\n throw new Error(`OpenAI streaming error: ${error.message}`);\n }\n}\n\n/**\n * Generates a structured object using OpenAI models via Vercel AI SDK\n * \n * @param {Object} params - Configuration parameters\n * @param {string} params.apiKey - OpenAI API key\n * @param {string} params.modelId - Model ID (e.g., 'gpt-4', 'gpt-3.5-turbo')\n * @param {Array} params.messages - Array of message objects\n * @param {Object} params.schema - JSON schema for the response object\n * @param {string} params.objectName - Name of the object to generate\n * @returns {Promise} The generated structured object\n */\nexport async function generateOpenAIObject(params) {\n try {\n const { apiKey, modelId, messages, schema, objectName } = params;\n \n if (!apiKey) throw new Error('OpenAI API key is required');\n if (!modelId) throw new Error('Model ID is required');\n if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required');\n if (!schema) throw new Error('Schema is required');\n if (!objectName) throw new Error('Object name is required');\n \n const openai = createOpenAI({ apiKey });\n \n // Using the Vercel AI SDK's function calling capabilities\n const response = await openai.chat.completions.create({\n model: modelId,\n messages,\n functions: [\n {\n name: objectName,\n description: `Generate a ${objectName} object`,\n parameters: schema,\n },\n ],\n function_call: { name: objectName },\n });\n \n const functionCall = response.choices[0].message.function_call;\n return JSON.parse(functionCall.arguments);\n } catch (error) {\n console.error('OpenAI object generation error:', error);\n throw new Error(`OpenAI object generation error: ${error.message}`);\n }\n}\n```\n\n\n\n\n```javascript\n// Additional implementation notes for openai.js\n\n/**\n * Export a provider info object for OpenAI\n */\nexport const providerInfo = {\n id: 'openai',\n name: 'OpenAI',\n description: 'OpenAI API integration using Vercel AI SDK',\n models: {\n 'gpt-4': {\n id: 'gpt-4',\n name: 'GPT-4',\n contextWindow: 8192,\n supportsFunctions: true,\n },\n 'gpt-4-turbo': {\n id: 'gpt-4-turbo',\n name: 'GPT-4 Turbo',\n contextWindow: 128000,\n supportsFunctions: true,\n },\n 'gpt-3.5-turbo': {\n id: 'gpt-3.5-turbo',\n name: 'GPT-3.5 Turbo',\n contextWindow: 16385,\n supportsFunctions: true,\n }\n }\n};\n\n/**\n * Helper function to format error responses consistently\n * \n * @param {Error} error - The caught error\n * @param {string} operation - The operation being performed\n * @returns {Error} A formatted error\n */\nfunction formatError(error, operation) {\n // Extract OpenAI specific error details if available\n const statusCode = error.status || error.statusCode;\n const errorType = error.type || error.code || 'unknown_error';\n \n // Create a more detailed error message\n const message = `OpenAI ${operation} error (${errorType}): ${error.message}`;\n \n // Create a new error with the formatted message\n const formattedError = new Error(message);\n \n // Add additional properties for debugging\n formattedError.originalError = error;\n formattedError.provider = 'openai';\n formattedError.statusCode = statusCode;\n formattedError.errorType = errorType;\n \n return formattedError;\n}\n\n/**\n * Example usage with the unified AI services interface:\n * \n * // In ai-services-unified.js\n * import * as openaiProvider from './ai-providers/openai.js';\n * \n * export async function generateText(params) {\n * switch(params.provider) {\n * case 'openai':\n * return openaiProvider.generateOpenAIText(params);\n * // other providers...\n * }\n * }\n */\n\n// Note: For proper error handling with the Vercel AI SDK, you may need to:\n// 1. Check for rate limiting errors (429)\n// 2. Handle token context window exceeded errors\n// 3. Implement exponential backoff for retries on 5xx errors\n// 4. Parse streaming errors properly from the ReadableStream\n```\n\n\n\n\n```javascript\n// Correction for openai.js provider module\n\n// IMPORTANT: Use the correct import from Vercel AI SDK\nimport { createOpenAI, openai } from '@ai-sdk/openai';\n\n// Note: Before using this module, install the required dependency:\n// npm install @ai-sdk/openai\n\n// The rest of the implementation remains the same, but uses the correct imports.\n// When implementing this module, ensure your package.json includes this dependency.\n\n// For streaming implementations with the Vercel AI SDK, you can also use the \n// streamText and experimental streamUI methods:\n\n/**\n * Example of using streamText for simpler streaming implementation\n */\nexport async function streamOpenAITextSimplified(params) {\n try {\n const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params;\n \n if (!apiKey) throw new Error('OpenAI API key is required');\n \n const openaiClient = createOpenAI({ apiKey });\n \n return openaiClient.streamText({\n model: modelId,\n messages,\n temperature,\n maxTokens,\n });\n } catch (error) {\n console.error('OpenAI streaming error:', error);\n throw new Error(`OpenAI streaming error: ${error.message}`);\n }\n}\n```\n", + "status": "in-progress", "dependencies": [], "parentTaskId": 61 }, @@ -3252,7 +3252,7 @@ "title": "Implement `google.js` Provider Module using Vercel AI SDK", "description": "Create and implement the `google.js` module within `src/ai-providers/`. This module should contain functions to interact with Google AI models (e.g., Gemini) using the **Vercel AI SDK (`@ai-sdk/google`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.", "details": "\n\n\n```javascript\n// Implementation details for google.js provider module\n\n// 1. Required imports\nimport { GoogleGenerativeAI } from \"@ai-sdk/google\";\nimport { streamText, generateText, generateObject } from \"@ai-sdk/core\";\n\n// 2. Model configuration\nconst DEFAULT_MODEL = \"gemini-1.5-pro\"; // Default model, can be overridden\nconst TEMPERATURE_DEFAULT = 0.7;\n\n// 3. Function implementations\nexport async function generateGoogleText({ \n prompt, \n model = DEFAULT_MODEL, \n temperature = TEMPERATURE_DEFAULT,\n apiKey \n}) {\n if (!apiKey) throw new Error(\"Google API key is required\");\n \n const googleAI = new GoogleGenerativeAI(apiKey);\n const googleModel = googleAI.getGenerativeModel({ model });\n \n const result = await generateText({\n model: googleModel,\n prompt,\n temperature\n });\n \n return result;\n}\n\nexport async function streamGoogleText({ \n prompt, \n model = DEFAULT_MODEL, \n temperature = TEMPERATURE_DEFAULT,\n apiKey \n}) {\n if (!apiKey) throw new Error(\"Google API key is required\");\n \n const googleAI = new GoogleGenerativeAI(apiKey);\n const googleModel = googleAI.getGenerativeModel({ model });\n \n const stream = await streamText({\n model: googleModel,\n prompt,\n temperature\n });\n \n return stream;\n}\n\nexport async function generateGoogleObject({ \n prompt, \n schema,\n model = DEFAULT_MODEL, \n temperature = TEMPERATURE_DEFAULT,\n apiKey \n}) {\n if (!apiKey) throw new Error(\"Google API key is required\");\n \n const googleAI = new GoogleGenerativeAI(apiKey);\n const googleModel = googleAI.getGenerativeModel({ model });\n \n const result = await generateObject({\n model: googleModel,\n prompt,\n schema,\n temperature\n });\n \n return result;\n}\n\n// 4. Environment variable setup in .env.local\n// GOOGLE_API_KEY=your_google_api_key_here\n\n// 5. Error handling considerations\n// - Implement proper error handling for API rate limits\n// - Add retries for transient failures\n// - Consider adding logging for debugging purposes\n```\n", - "status": "pending", + "status": "done", "dependencies": [], "parentTaskId": 61 }, @@ -3876,6 +3876,28 @@ "dependencies": [], "priority": "medium", "subtasks": [] + }, + { + "id": 70, + "title": "Implement 'diagram' command for Mermaid diagram generation", + "description": "Develop a CLI command named 'diagram' that generates Mermaid diagrams to visualize task dependencies and workflows, with options to target specific tasks or generate comprehensive diagrams for all tasks.", + "details": "The task involves implementing a new command that accepts an optional '--id' parameter: if provided, the command generates a diagram illustrating the chosen task and its dependencies; if omitted, it produces a diagram that includes all tasks. The diagrams should use color coding to reflect task status and arrows to denote dependencies. In addition to CLI rendering, the command should offer an option to save the output as a Markdown (.md) file. Consider integrating with the existing task management system to pull task details and status. Pay attention to formatting consistency and error handling for invalid or missing task IDs. Comments should be added to the code to improve maintainability, and unit tests should cover edge cases such as cyclic dependencies, missing tasks, and invalid input formats.", + "testStrategy": "Verify the command functionality by testing with both specific task IDs and general invocation: 1) Run the command with a valid '--id' and ensure the resulting diagram accurately depicts the specified task's dependencies with correct color codings for statuses. 2) Execute the command without '--id' to ensure a complete workflow diagram is generated for all tasks. 3) Check that arrows correctly represent dependency relationships. 4) Validate the Markdown (.md) file export option by confirming the file format and content after saving. 5) Test error responses for non-existent task IDs and malformed inputs.", + "status": "pending", + "dependencies": [], + "priority": "medium", + "subtasks": [] + }, + { + "id": 71, + "title": "Add Model-Specific maxTokens Override Configuration", + "description": "Implement functionality to allow specifying a maximum token limit for individual AI models within .taskmasterconfig, overriding the role-based maxTokens if the model-specific limit is lower.", + "details": "1. **Modify `.taskmasterconfig` Structure:** Add a new top-level section `modelOverrides` (e.g., `\"modelOverrides\": { \"o3-mini\": { \"maxTokens\": 100000 } }`).\n2. **Update `config-manager.js`:**\n - Modify config loading to read the new `modelOverrides` section.\n - Update `getParametersForRole(role)` logic: Fetch role defaults (roleMaxTokens, temperature). Get the modelId for the role. Look up `modelOverrides[modelId].maxTokens` (modelSpecificMaxTokens). Calculate `effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens ?? Infinity)`. Return `{ maxTokens: effectiveMaxTokens, temperature }`.\n3. **Update Documentation:** Add an example of `modelOverrides` to `.taskmasterconfig.example` or relevant documentation.", + "testStrategy": "1. **Unit Tests (`config-manager.js`):**\n - Verify `getParametersForRole` returns role defaults when no override exists.\n - Verify `getParametersForRole` returns the lower model-specific limit when an override exists and is lower.\n - Verify `getParametersForRole` returns the role limit when an override exists but is higher.\n - Verify handling of missing `modelOverrides` section.\n2. **Integration Tests (`ai-services-unified.js`):**\n - Call an AI service (e.g., `generateTextService`) with a config having a model override.\n - Mock the underlying provider function.\n - Assert that the `maxTokens` value passed to the mocked provider function matches the expected (potentially overridden) minimum value.", + "status": "pending", + "dependencies": [], + "priority": "high", + "subtasks": [] } ] } \ No newline at end of file