diff --git a/.changeset/shaky-buckets-show.md b/.changeset/shaky-buckets-show.md new file mode 100644 index 00000000..133da3bc --- /dev/null +++ b/.changeset/shaky-buckets-show.md @@ -0,0 +1,5 @@ +--- +"task-master-ai": patch +--- + +Fix external providers (bedrock, azure, vertex) diff --git a/mcp-server/src/core/direct-functions/models.js b/mcp-server/src/core/direct-functions/models.js index aa0dcff2..39e1b7a6 100644 --- a/mcp-server/src/core/direct-functions/models.js +++ b/mcp-server/src/core/direct-functions/models.js @@ -14,6 +14,37 @@ import { } from '../../../../scripts/modules/utils.js'; import { createLogWrapper } from '../../tools/utils.js'; +// Define supported roles for model setting +const MODEL_ROLES = ['main', 'research', 'fallback']; + +/** + * Handle setting models for different roles + * @param {Object} args - Arguments containing role-specific model IDs + * @param {Object} context - Context object with session, mcpLog, projectRoot + * @returns {Object|null} Result if a model was set, null if no model setting was requested + */ +async function handleModelSetting(args, context) { + for (const role of MODEL_ROLES) { + const roleKey = `set${role.charAt(0).toUpperCase() + role.slice(1)}`; // setMain, setResearch, setFallback + + if (args[roleKey]) { + // Determine provider hint from custom provider flags + let providerHint = undefined; + if (args.openrouter) providerHint = 'openrouter'; + else if (args.ollama) providerHint = 'ollama'; + else if (args.bedrock) providerHint = 'bedrock'; + else if (args.azure) providerHint = 'azure'; + else if (args.vertex) providerHint = 'vertex'; + + return await setModel(role, args[roleKey], { + ...context, + providerHint + }); + } + } + return null; // No model setting was requested +} + /** * Get or update model configuration * @param {Object} args - Arguments passed by the MCP tool @@ -31,16 +62,25 @@ export async function modelsDirect(args, log, context = {}) { log.info(`Executing models_direct with args: ${JSON.stringify(args)}`); log.info(`Using project root: ${projectRoot}`); - // Validate flags: cannot use both openrouter and ollama simultaneously - if (args.openrouter && args.ollama) { + // Validate flags: only one custom provider flag can be used simultaneously + const customProviderFlags = [ + args.openrouter, + args.ollama, + args.bedrock, + args.azure, + args.vertex + ].filter(Boolean); + + if (customProviderFlags.length > 1) { log.error( - 'Error: Cannot use both openrouter and ollama flags simultaneously.' + 'Error: Cannot use multiple custom provider flags simultaneously.' ); return { success: false, error: { code: 'INVALID_ARGS', - message: 'Cannot use both openrouter and ollama flags simultaneously.' + message: + 'Cannot use multiple custom provider flags simultaneously. Choose only one: openrouter, ollama, bedrock, azure, or vertex.' } }; } @@ -54,55 +94,22 @@ export async function modelsDirect(args, log, context = {}) { return await getAvailableModelsList({ session, mcpLog, - projectRoot // Pass projectRoot to function + projectRoot }); } - // Handle setting a specific model - if (args.setMain) { - return await setModel('main', args.setMain, { - session, - mcpLog, - projectRoot, // Pass projectRoot to function - providerHint: args.openrouter - ? 'openrouter' - : args.ollama - ? 'ollama' - : undefined // Pass hint - }); - } - - if (args.setResearch) { - return await setModel('research', args.setResearch, { - session, - mcpLog, - projectRoot, // Pass projectRoot to function - providerHint: args.openrouter - ? 'openrouter' - : args.ollama - ? 'ollama' - : undefined // Pass hint - }); - } - - if (args.setFallback) { - return await setModel('fallback', args.setFallback, { - session, - mcpLog, - projectRoot, // Pass projectRoot to function - providerHint: args.openrouter - ? 'openrouter' - : args.ollama - ? 'ollama' - : undefined // Pass hint - }); + // Handle setting any model role using unified function + const modelContext = { session, mcpLog, projectRoot }; + const modelSetResult = await handleModelSetting(args, modelContext); + if (modelSetResult) { + return modelSetResult; } // Default action: get current configuration return await getModelConfiguration({ session, mcpLog, - projectRoot // Pass projectRoot to function + projectRoot }); } finally { disableSilentMode(); diff --git a/mcp-server/src/tools/models.js b/mcp-server/src/tools/models.js index ef2ba24f..e38ff308 100644 --- a/mcp-server/src/tools/models.js +++ b/mcp-server/src/tools/models.js @@ -55,7 +55,21 @@ export function registerModelsTool(server) { ollama: z .boolean() .optional() - .describe('Indicates the set model ID is a custom Ollama model.') + .describe('Indicates the set model ID is a custom Ollama model.'), + bedrock: z + .boolean() + .optional() + .describe('Indicates the set model ID is a custom AWS Bedrock model.'), + azure: z + .boolean() + .optional() + .describe('Indicates the set model ID is a custom Azure OpenAI model.'), + vertex: z + .boolean() + .optional() + .describe( + 'Indicates the set model ID is a custom Google Vertex AI model.' + ) }), execute: withNormalizedProjectRoot(async (args, { log, session }) => { try { diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index 5000ac78..855b6c55 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -294,30 +294,14 @@ async function runInteractiveSetup(projectRoot) { } : null; - const customOpenRouterOption = { - name: '* Custom OpenRouter model', // Symbol updated - value: '__CUSTOM_OPENROUTER__' - }; - - const customOllamaOption = { - name: '* Custom Ollama model', // Symbol updated - value: '__CUSTOM_OLLAMA__' - }; - - const customBedrockOption = { - name: '* Custom Bedrock model', // Add Bedrock custom option - value: '__CUSTOM_BEDROCK__' - }; - - const customAzureOption = { - name: '* Custom Azure OpenAI model', // Add Azure custom option - value: '__CUSTOM_AZURE__' - }; - - const customVertexOption = { - name: '* Custom Vertex AI model', // Add Vertex custom option - value: '__CUSTOM_VERTEX__' - }; + // Define custom provider options + const customProviderOptions = [ + { name: '* Custom OpenRouter model', value: '__CUSTOM_OPENROUTER__' }, + { name: '* Custom Ollama model', value: '__CUSTOM_OLLAMA__' }, + { name: '* Custom Bedrock model', value: '__CUSTOM_BEDROCK__' }, + { name: '* Custom Azure model', value: '__CUSTOM_AZURE__' }, + { name: '* Custom Vertex model', value: '__CUSTOM_VERTEX__' } + ]; let choices = []; let defaultIndex = 0; // Default to 'Cancel' @@ -362,15 +346,8 @@ async function runInteractiveSetup(projectRoot) { if (noChangeOption) { systemOptions.push(noChangeOption); } - systemOptions.push(cancelOption); - - const customOptions = [ - customOpenRouterOption, - customOllamaOption, - customBedrockOption, - customAzureOption, - customVertexOption - ]; + commonPrefix.push(cancelOption); + commonPrefix.push(...customProviderOptions); const systemLength = systemOptions.length; @@ -486,6 +463,130 @@ async function runInteractiveSetup(projectRoot) { const coreOptionsSetup = { projectRoot }; // Pass root for setup actions // Helper to handle setting a model (including custom) + async function handleCustomProviderSelection(provider, role) { + const providerConfigs = { + bedrock: { + prompt: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`, + envVars: ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'], + successMessage: + 'Custom Bedrock model will be used. No validation performed.' + }, + azure: { + prompt: `Enter the custom Azure OpenAI deployment name for the ${role} role:`, + envVars: ['AZURE_OPENAI_API_KEY', 'AZURE_OPENAI_ENDPOINT'], + successMessage: + 'Custom Azure model will be used. No validation performed.' + }, + vertex: { + prompt: `Enter the custom Vertex AI model ID for the ${role} role (e.g., gemini-pro):`, + envVars: ['GOOGLE_APPLICATION_CREDENTIALS'], + successMessage: + 'Custom Vertex model will be used. No validation performed.' + }, + openrouter: { + prompt: `Enter the custom OpenRouter Model ID for the ${role} role:`, + envVars: [], + successMessage: '', + validate: true + }, + ollama: { + prompt: `Enter the custom Ollama Model ID for the ${role} role:`, + envVars: [], + successMessage: '', + validate: true + } + }; + + const config = providerConfigs[provider]; + if (!config) { + return { success: false, error: `Unknown provider: ${provider}` }; + } + + const { customId } = await inquirer.prompt([ + { + type: 'input', + name: 'customId', + message: config.prompt + } + ]); + + if (!customId) { + console.log(chalk.yellow('No custom ID entered. Skipping role.')); + return { success: false }; + } + + // Check required environment variables + if (config.envVars.length > 0) { + const missingVars = config.envVars.filter( + (envVar) => !process.env[envVar] + ); + if (missingVars.length > 0) { + console.error( + chalk.red( + `Error: Missing environment variables: ${missingVars.join(', ')}. Please set them before using custom ${provider} models.` + ) + ); + return { success: false }; + } + } + + // Handle validation for specific providers + if (provider === 'openrouter' && config.validate) { + const openRouterModels = await fetchOpenRouterModelsCLI(); + if ( + !openRouterModels || + !openRouterModels.some((m) => m.id === customId) + ) { + console.error( + chalk.red( + `Error: Model ID "${customId}" not found in the live OpenRouter model list. Please check the ID.` + ) + ); + return { success: false }; + } + } else if (provider === 'ollama' && config.validate) { + const ollamaBaseURL = getBaseUrlForRole(role, projectRoot); + const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL); + if (ollamaModels === null) { + console.error( + chalk.red( + `Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.` + ) + ); + return { success: false }; + } else if (!ollamaModels.some((m) => m.model === customId)) { + console.error( + chalk.red( + `Error: Model ID "${customId}" not found in the Ollama instance. Please verify the model is pulled and available.` + ) + ); + console.log( + chalk.yellow( + `You can check available models with: curl ${ollamaBaseURL}/tags` + ) + ); + return { success: false }; + } + } + + if (config.successMessage) { + console.log( + chalk.blue( + config.successMessage.replace( + 'Custom Bedrock', + `Custom ${provider.charAt(0).toUpperCase() + provider.slice(1)}` + ) + ) + ); + } + + return { + success: true, + modelId: customId, + provider: provider + }; + } + async function handleSetModel(role, selectedValue, currentModelId) { if (selectedValue === '__CANCEL__') { console.log( @@ -506,180 +607,50 @@ async function runInteractiveSetup(projectRoot) { let isCustomSelection = false; if (selectedValue === '__CUSTOM_OPENROUTER__') { - isCustomSelection = true; - const { customId } = await inquirer.prompt([ - { - type: 'input', - name: 'customId', - message: `Enter the custom OpenRouter Model ID for the ${role} role:` - } - ]); - if (!customId) { - console.log(chalk.yellow('No custom ID entered. Skipping role.')); - return true; // Continue setup, but don't set this role - } - modelIdToSet = customId; - providerHint = CUSTOM_PROVIDERS.OPENROUTER; - // Validate against live OpenRouter list - const openRouterModels = await fetchOpenRouterModelsCLI(); - if ( - !openRouterModels || - !openRouterModels.some((m) => m.id === modelIdToSet) - ) { - console.error( - chalk.red( - `Error: Model ID "${modelIdToSet}" not found in the live OpenRouter model list. Please check the ID.` - ) - ); + const result = await handleCustomProviderSelection('openrouter', role); + if (!result.success) { setupSuccess = false; - return true; // Continue setup, but mark as failed + return true; } + isCustomSelection = true; + modelIdToSet = result.modelId; + providerHint = result.provider; } else if (selectedValue === '__CUSTOM_OLLAMA__') { + const result = await handleCustomProviderSelection('ollama', role); + if (!result.success) { + setupSuccess = false; + return true; + } isCustomSelection = true; - const { customId } = await inquirer.prompt([ - { - type: 'input', - name: 'customId', - message: `Enter the custom Ollama Model ID for the ${role} role:` - } - ]); - if (!customId) { - console.log(chalk.yellow('No custom ID entered. Skipping role.')); - return true; // Continue setup, but don't set this role - } - modelIdToSet = customId; - providerHint = CUSTOM_PROVIDERS.OLLAMA; - // Get the Ollama base URL from config for this role - const ollamaBaseURL = getBaseUrlForRole(role, projectRoot); - // Validate against live Ollama list - const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL); - if (ollamaModels === null) { - console.error( - chalk.red( - `Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.` - ) - ); - setupSuccess = false; - return true; // Continue setup, but mark as failed - } else if (!ollamaModels.some((m) => m.model === modelIdToSet)) { - console.error( - chalk.red( - `Error: Model ID "${modelIdToSet}" not found in the Ollama instance. Please verify the model is pulled and available.` - ) - ); - console.log( - chalk.yellow( - `You can check available models with: curl ${ollamaBaseURL}/tags` - ) - ); - setupSuccess = false; - return true; // Continue setup, but mark as failed - } + modelIdToSet = result.modelId; + providerHint = result.provider; } else if (selectedValue === '__CUSTOM_BEDROCK__') { - isCustomSelection = true; - const { customId } = await inquirer.prompt([ - { - type: 'input', - name: 'customId', - message: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):` - } - ]); - if (!customId) { - console.log(chalk.yellow('No custom ID entered. Skipping role.')); - return true; // Continue setup, but don't set this role - } - modelIdToSet = customId; - providerHint = CUSTOM_PROVIDERS.BEDROCK; - - // Check if AWS environment variables exist - if ( - !process.env.AWS_ACCESS_KEY_ID || - !process.env.AWS_SECRET_ACCESS_KEY - ) { - console.error( - chalk.red( - 'Error: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Please set them before using custom Bedrock models.' - ) - ); + const result = await handleCustomProviderSelection('bedrock', role); + if (!result.success) { setupSuccess = false; - return true; // Continue setup, but mark as failed + return true; } - - console.log( - chalk.blue( - `Custom Bedrock model "${modelIdToSet}" will be used. No validation performed.` - ) - ); + isCustomSelection = true; + modelIdToSet = result.modelId; + providerHint = result.provider; } else if (selectedValue === '__CUSTOM_AZURE__') { - isCustomSelection = true; - const { customId } = await inquirer.prompt([ - { - type: 'input', - name: 'customId', - message: `Enter the custom Azure OpenAI Model ID for the ${role} role (e.g., gpt-4o):` - } - ]); - if (!customId) { - console.log(chalk.yellow('No custom ID entered. Skipping role.')); - return true; // Continue setup, but don't set this role - } - modelIdToSet = customId; - providerHint = CUSTOM_PROVIDERS.AZURE; - - // Check if Azure environment variables exist - if ( - !process.env.AZURE_OPENAI_API_KEY || - !process.env.AZURE_OPENAI_ENDPOINT - ) { - console.error( - chalk.red( - 'Error: AZURE_OPENAI_API_KEY and/or AZURE_OPENAI_ENDPOINT environment variables are missing. Please set them before using custom Azure models.' - ) - ); + const result = await handleCustomProviderSelection('azure', role); + if (!result.success) { setupSuccess = false; - return true; // Continue setup, but mark as failed + return true; } - - console.log( - chalk.blue( - `Custom Azure OpenAI model "${modelIdToSet}" will be used. No validation performed.` - ) - ); + isCustomSelection = true; + modelIdToSet = result.modelId; + providerHint = result.provider; } else if (selectedValue === '__CUSTOM_VERTEX__') { - isCustomSelection = true; - const { customId } = await inquirer.prompt([ - { - type: 'input', - name: 'customId', - message: `Enter the custom Vertex AI Model ID for the ${role} role (e.g., gemini-1.5-pro-002):` - } - ]); - if (!customId) { - console.log(chalk.yellow('No custom ID entered. Skipping role.')); - return true; // Continue setup, but don't set this role - } - modelIdToSet = customId; - providerHint = CUSTOM_PROVIDERS.VERTEX; - - // Check if Google/Vertex environment variables exist - if ( - !process.env.GOOGLE_API_KEY && - !process.env.GOOGLE_APPLICATION_CREDENTIALS - ) { - console.error( - chalk.red( - 'Error: Either GOOGLE_API_KEY or GOOGLE_APPLICATION_CREDENTIALS environment variable is required. Please set one before using custom Vertex models.' - ) - ); + const result = await handleCustomProviderSelection('vertex', role); + if (!result.success) { setupSuccess = false; - return true; // Continue setup, but mark as failed + return true; } - - console.log( - chalk.blue( - `Custom Vertex AI model "${modelIdToSet}" will be used. No validation performed.` - ) - ); + isCustomSelection = true; + modelIdToSet = result.modelId; + providerHint = result.provider; } else if ( selectedValue && typeof selectedValue === 'object' && diff --git a/scripts/modules/config-manager.js b/scripts/modules/config-manager.js index d289e2e2..8848b1c4 100644 --- a/scripts/modules/config-manager.js +++ b/scripts/modules/config-manager.js @@ -4,6 +4,7 @@ import chalk from 'chalk'; import { fileURLToPath } from 'url'; import { log, findProjectRoot, resolveEnvVariable } from './utils.js'; import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js'; +import { HOSTED_AI_PROVIDERS } from '../../src/constants/ai-providers.js'; import { findConfigPath } from '../../src/utils/path-utils.js'; import { VALIDATED_PROVIDERS, diff --git a/scripts/modules/task-manager/models.js b/scripts/modules/task-manager/models.js index 6bfdfae8..0a12c331 100644 --- a/scripts/modules/task-manager/models.js +++ b/scripts/modules/task-manager/models.js @@ -525,7 +525,7 @@ async function setModel(role, modelId, options = {}) { success: false, error: { code: 'MODEL_NOT_FOUND_NO_HINT', - message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.` + message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, or --vertex.` } }; } diff --git a/src/constants/ai-providers.js b/src/constants/ai-providers.js new file mode 100644 index 00000000..bceb4fde --- /dev/null +++ b/src/constants/ai-providers.js @@ -0,0 +1,10 @@ +/** + * AI Provider constants for Task Master application + */ + +/** + * Hosted AI providers that support custom model configurations. + * These providers are always considered valid since users can deploy + * custom models on these platforms. + */ +export const HOSTED_AI_PROVIDERS = ['bedrock', 'azure', 'vertex'];