chore: cleanup

This commit is contained in:
Ralph Khreish
2025-06-20 15:57:57 +03:00
parent 6b6fe327d3
commit f2f42b0659
5 changed files with 192 additions and 198 deletions

View File

@@ -1,5 +0,0 @@
---
"task-master-ai": patch
---
Fix external providers (bedrock, azure, vertex)

View File

@@ -13,10 +13,20 @@ import {
disableSilentMode disableSilentMode
} from '../../../../scripts/modules/utils.js'; } from '../../../../scripts/modules/utils.js';
import { createLogWrapper } from '../../tools/utils.js'; import { createLogWrapper } from '../../tools/utils.js';
import { CUSTOM_PROVIDERS_ARRAY } from '../../../../src/constants/providers.js';
// Define supported roles for model setting // Define supported roles for model setting
const MODEL_ROLES = ['main', 'research', 'fallback']; const MODEL_ROLES = ['main', 'research', 'fallback'];
/**
* Determine provider hint from custom provider flags
* @param {Object} args - Arguments containing provider flags
* @returns {string|undefined} Provider hint or undefined if no custom provider flag is set
*/
function getProviderHint(args) {
return CUSTOM_PROVIDERS_ARRAY.find((provider) => args[provider]);
}
/** /**
* Handle setting models for different roles * Handle setting models for different roles
* @param {Object} args - Arguments containing role-specific model IDs * @param {Object} args - Arguments containing role-specific model IDs
@@ -28,13 +38,7 @@ async function handleModelSetting(args, context) {
const roleKey = `set${role.charAt(0).toUpperCase() + role.slice(1)}`; // setMain, setResearch, setFallback const roleKey = `set${role.charAt(0).toUpperCase() + role.slice(1)}`; // setMain, setResearch, setFallback
if (args[roleKey]) { if (args[roleKey]) {
// Determine provider hint from custom provider flags const providerHint = getProviderHint(args);
let providerHint = undefined;
if (args.openrouter) providerHint = 'openrouter';
else if (args.ollama) providerHint = 'ollama';
else if (args.bedrock) providerHint = 'bedrock';
else if (args.azure) providerHint = 'azure';
else if (args.vertex) providerHint = 'vertex';
return await setModel(role, args[roleKey], { return await setModel(role, args[roleKey], {
...context, ...context,
@@ -63,13 +67,9 @@ export async function modelsDirect(args, log, context = {}) {
log.info(`Using project root: ${projectRoot}`); log.info(`Using project root: ${projectRoot}`);
// Validate flags: only one custom provider flag can be used simultaneously // Validate flags: only one custom provider flag can be used simultaneously
const customProviderFlags = [ const customProviderFlags = CUSTOM_PROVIDERS_ARRAY.filter(
args.openrouter, (provider) => args[provider]
args.ollama, );
args.bedrock,
args.azure,
args.vertex
].filter(Boolean);
if (customProviderFlags.length > 1) { if (customProviderFlags.length > 1) {
log.error( log.error(

View File

@@ -346,19 +346,18 @@ async function runInteractiveSetup(projectRoot) {
if (noChangeOption) { if (noChangeOption) {
systemOptions.push(noChangeOption); systemOptions.push(noChangeOption);
} }
commonPrefix.push(cancelOption); systemOptions.push(cancelOption);
commonPrefix.push(...customProviderOptions);
const systemLength = systemOptions.length; const systemLength = systemOptions.length;
if (allowNone) { if (allowNone) {
choices = [ choices = [
...systemOptions, ...systemOptions,
new inquirer.Separator('── Standard Models ──'), new inquirer.Separator('\n── Standard Models ──'),
{ name: '⚪ None (disable)', value: null }, { name: '⚪ None (disable)', value: null },
...roleChoices, ...roleChoices,
new inquirer.Separator('── Custom Providers ──'), new inquirer.Separator('\n── Custom Providers ──'),
...customOptions ...customProviderOptions
]; ];
// Adjust default index: System + Sep1 + None (+2) // Adjust default index: System + Sep1 + None (+2)
const noneOptionIndex = systemLength + 1; const noneOptionIndex = systemLength + 1;
@@ -369,10 +368,10 @@ async function runInteractiveSetup(projectRoot) {
} else { } else {
choices = [ choices = [
...systemOptions, ...systemOptions,
new inquirer.Separator('── Standard Models ──'), new inquirer.Separator('\n── Standard Models ──'),
...roleChoices, ...roleChoices,
new inquirer.Separator('── Custom Providers ──'), new inquirer.Separator('\n── Custom Providers ──'),
...customOptions ...customProviderOptions
]; ];
// Adjust default index: System + Sep (+1) // Adjust default index: System + Sep (+1)
defaultIndex = defaultIndex =
@@ -463,130 +462,6 @@ async function runInteractiveSetup(projectRoot) {
const coreOptionsSetup = { projectRoot }; // Pass root for setup actions const coreOptionsSetup = { projectRoot }; // Pass root for setup actions
// Helper to handle setting a model (including custom) // Helper to handle setting a model (including custom)
async function handleCustomProviderSelection(provider, role) {
const providerConfigs = {
bedrock: {
prompt: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`,
envVars: ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'],
successMessage:
'Custom Bedrock model will be used. No validation performed.'
},
azure: {
prompt: `Enter the custom Azure OpenAI deployment name for the ${role} role:`,
envVars: ['AZURE_OPENAI_API_KEY', 'AZURE_OPENAI_ENDPOINT'],
successMessage:
'Custom Azure model will be used. No validation performed.'
},
vertex: {
prompt: `Enter the custom Vertex AI model ID for the ${role} role (e.g., gemini-pro):`,
envVars: ['GOOGLE_APPLICATION_CREDENTIALS'],
successMessage:
'Custom Vertex model will be used. No validation performed.'
},
openrouter: {
prompt: `Enter the custom OpenRouter Model ID for the ${role} role:`,
envVars: [],
successMessage: '',
validate: true
},
ollama: {
prompt: `Enter the custom Ollama Model ID for the ${role} role:`,
envVars: [],
successMessage: '',
validate: true
}
};
const config = providerConfigs[provider];
if (!config) {
return { success: false, error: `Unknown provider: ${provider}` };
}
const { customId } = await inquirer.prompt([
{
type: 'input',
name: 'customId',
message: config.prompt
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return { success: false };
}
// Check required environment variables
if (config.envVars.length > 0) {
const missingVars = config.envVars.filter(
(envVar) => !process.env[envVar]
);
if (missingVars.length > 0) {
console.error(
chalk.red(
`Error: Missing environment variables: ${missingVars.join(', ')}. Please set them before using custom ${provider} models.`
)
);
return { success: false };
}
}
// Handle validation for specific providers
if (provider === 'openrouter' && config.validate) {
const openRouterModels = await fetchOpenRouterModelsCLI();
if (
!openRouterModels ||
!openRouterModels.some((m) => m.id === customId)
) {
console.error(
chalk.red(
`Error: Model ID "${customId}" not found in the live OpenRouter model list. Please check the ID.`
)
);
return { success: false };
}
} else if (provider === 'ollama' && config.validate) {
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL);
if (ollamaModels === null) {
console.error(
chalk.red(
`Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
)
);
return { success: false };
} else if (!ollamaModels.some((m) => m.model === customId)) {
console.error(
chalk.red(
`Error: Model ID "${customId}" not found in the Ollama instance. Please verify the model is pulled and available.`
)
);
console.log(
chalk.yellow(
`You can check available models with: curl ${ollamaBaseURL}/tags`
)
);
return { success: false };
}
}
if (config.successMessage) {
console.log(
chalk.blue(
config.successMessage.replace(
'Custom Bedrock',
`Custom ${provider.charAt(0).toUpperCase() + provider.slice(1)}`
)
)
);
}
return {
success: true,
modelId: customId,
provider: provider
};
}
async function handleSetModel(role, selectedValue, currentModelId) { async function handleSetModel(role, selectedValue, currentModelId) {
if (selectedValue === '__CANCEL__') { if (selectedValue === '__CANCEL__') {
console.log( console.log(
@@ -607,50 +482,180 @@ async function runInteractiveSetup(projectRoot) {
let isCustomSelection = false; let isCustomSelection = false;
if (selectedValue === '__CUSTOM_OPENROUTER__') { if (selectedValue === '__CUSTOM_OPENROUTER__') {
const result = await handleCustomProviderSelection('openrouter', role);
if (!result.success) {
setupSuccess = false;
return true;
}
isCustomSelection = true; isCustomSelection = true;
modelIdToSet = result.modelId; const { customId } = await inquirer.prompt([
providerHint = result.provider; {
type: 'input',
name: 'customId',
message: `Enter the custom OpenRouter Model ID for the ${role} role:`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true; // Continue setup, but don't set this role
}
modelIdToSet = customId;
providerHint = CUSTOM_PROVIDERS.OPENROUTER;
// Validate against live OpenRouter list
const openRouterModels = await fetchOpenRouterModelsCLI();
if (
!openRouterModels ||
!openRouterModels.some((m) => m.id === modelIdToSet)
) {
console.error(
chalk.red(
`Error: Model ID "${modelIdToSet}" not found in the live OpenRouter model list. Please check the ID.`
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
} else if (selectedValue === '__CUSTOM_OLLAMA__') { } else if (selectedValue === '__CUSTOM_OLLAMA__') {
const result = await handleCustomProviderSelection('ollama', role);
if (!result.success) {
setupSuccess = false;
return true;
}
isCustomSelection = true; isCustomSelection = true;
modelIdToSet = result.modelId; const { customId } = await inquirer.prompt([
providerHint = result.provider; {
type: 'input',
name: 'customId',
message: `Enter the custom Ollama Model ID for the ${role} role:`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true; // Continue setup, but don't set this role
}
modelIdToSet = customId;
providerHint = CUSTOM_PROVIDERS.OLLAMA;
// Get the Ollama base URL from config for this role
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
// Validate against live Ollama list
const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL);
if (ollamaModels === null) {
console.error(
chalk.red(
`Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
} else if (!ollamaModels.some((m) => m.model === modelIdToSet)) {
console.error(
chalk.red(
`Error: Model ID "${modelIdToSet}" not found in the Ollama instance. Please verify the model is pulled and available.`
)
);
console.log(
chalk.yellow(
`You can check available models with: curl ${ollamaBaseURL}/tags`
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
} else if (selectedValue === '__CUSTOM_BEDROCK__') { } else if (selectedValue === '__CUSTOM_BEDROCK__') {
const result = await handleCustomProviderSelection('bedrock', role);
if (!result.success) {
setupSuccess = false;
return true;
}
isCustomSelection = true; isCustomSelection = true;
modelIdToSet = result.modelId; const { customId } = await inquirer.prompt([
providerHint = result.provider; {
type: 'input',
name: 'customId',
message: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true; // Continue setup, but don't set this role
}
modelIdToSet = customId;
providerHint = CUSTOM_PROVIDERS.BEDROCK;
// Check if AWS environment variables exist
if (
!process.env.AWS_ACCESS_KEY_ID ||
!process.env.AWS_SECRET_ACCESS_KEY
) {
console.error(
chalk.red(
'Error: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Please set them before using custom Bedrock models.'
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
console.log(
chalk.blue(
`Custom Bedrock model "${modelIdToSet}" will be used. No validation performed.`
)
);
} else if (selectedValue === '__CUSTOM_AZURE__') { } else if (selectedValue === '__CUSTOM_AZURE__') {
const result = await handleCustomProviderSelection('azure', role);
if (!result.success) {
setupSuccess = false;
return true;
}
isCustomSelection = true; isCustomSelection = true;
modelIdToSet = result.modelId; const { customId } = await inquirer.prompt([
providerHint = result.provider; {
type: 'input',
name: 'customId',
message: `Enter the custom Azure OpenAI Model ID for the ${role} role (e.g., gpt-4o):`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true; // Continue setup, but don't set this role
}
modelIdToSet = customId;
providerHint = CUSTOM_PROVIDERS.AZURE;
// Check if Azure environment variables exist
if (
!process.env.AZURE_OPENAI_API_KEY ||
!process.env.AZURE_OPENAI_ENDPOINT
) {
console.error(
chalk.red(
'Error: AZURE_OPENAI_API_KEY and/or AZURE_OPENAI_ENDPOINT environment variables are missing. Please set them before using custom Azure models.'
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
console.log(
chalk.blue(
`Custom Azure OpenAI model "${modelIdToSet}" will be used. No validation performed.`
)
);
} else if (selectedValue === '__CUSTOM_VERTEX__') { } else if (selectedValue === '__CUSTOM_VERTEX__') {
const result = await handleCustomProviderSelection('vertex', role);
if (!result.success) {
setupSuccess = false;
return true;
}
isCustomSelection = true; isCustomSelection = true;
modelIdToSet = result.modelId; const { customId } = await inquirer.prompt([
providerHint = result.provider; {
type: 'input',
name: 'customId',
message: `Enter the custom Vertex AI Model ID for the ${role} role (e.g., gemini-1.5-pro-002):`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true; // Continue setup, but don't set this role
}
modelIdToSet = customId;
providerHint = CUSTOM_PROVIDERS.VERTEX;
// Check if Google/Vertex environment variables exist
if (
!process.env.GOOGLE_API_KEY &&
!process.env.GOOGLE_APPLICATION_CREDENTIALS
) {
console.error(
chalk.red(
'Error: Either GOOGLE_API_KEY or GOOGLE_APPLICATION_CREDENTIALS environment variable is required. Please set one before using custom Vertex models.'
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
console.log(
chalk.blue(
`Custom Vertex AI model "${modelIdToSet}" will be used. No validation performed.`
)
);
} else if ( } else if (
selectedValue && selectedValue &&
typeof selectedValue === 'object' && typeof selectedValue === 'object' &&

View File

@@ -4,7 +4,6 @@ import chalk from 'chalk';
import { fileURLToPath } from 'url'; import { fileURLToPath } from 'url';
import { log, findProjectRoot, resolveEnvVariable } from './utils.js'; import { log, findProjectRoot, resolveEnvVariable } from './utils.js';
import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js'; import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js';
import { HOSTED_AI_PROVIDERS } from '../../src/constants/ai-providers.js';
import { findConfigPath } from '../../src/utils/path-utils.js'; import { findConfigPath } from '../../src/utils/path-utils.js';
import { import {
VALIDATED_PROVIDERS, VALIDATED_PROVIDERS,
@@ -510,7 +509,8 @@ function isApiKeySet(providerName, session = null, projectRoot = null) {
azure: 'AZURE_OPENAI_API_KEY', azure: 'AZURE_OPENAI_API_KEY',
openrouter: 'OPENROUTER_API_KEY', openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY', xai: 'XAI_API_KEY',
vertex: 'GOOGLE_API_KEY' // Vertex uses the same key as Google vertex: 'GOOGLE_API_KEY', // Vertex uses the same key as Google
bedrock: 'AWS_ACCESS_KEY_ID' // Bedrock uses AWS credentials
// Add other providers as needed // Add other providers as needed
}; };
@@ -606,6 +606,10 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
apiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key apiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key
placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE'; placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
break; break;
case 'bedrock':
apiKeyToCheck = mcpEnv.AWS_ACCESS_KEY_ID; // Bedrock uses AWS credentials
placeholderValue = 'YOUR_AWS_ACCESS_KEY_ID_HERE';
break;
default: default:
return false; // Unknown provider return false; // Unknown provider
} }

View File

@@ -1,10 +0,0 @@
/**
* AI Provider constants for Task Master application
*/
/**
* Hosted AI providers that support custom model configurations.
* These providers are always considered valid since users can deploy
* custom models on these platforms.
*/
export const HOSTED_AI_PROVIDERS = ['bedrock', 'azure', 'vertex'];