fix: Fix external provider support
This commit is contained in:
5
.changeset/shaky-buckets-show.md
Normal file
5
.changeset/shaky-buckets-show.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
"task-master-ai": patch
|
||||||
|
---
|
||||||
|
|
||||||
|
Fix external providers (bedrock, azure, vertex)
|
||||||
@@ -14,6 +14,37 @@ import {
|
|||||||
} from '../../../../scripts/modules/utils.js';
|
} from '../../../../scripts/modules/utils.js';
|
||||||
import { createLogWrapper } from '../../tools/utils.js';
|
import { createLogWrapper } from '../../tools/utils.js';
|
||||||
|
|
||||||
|
// Define supported roles for model setting
|
||||||
|
const MODEL_ROLES = ['main', 'research', 'fallback'];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle setting models for different roles
|
||||||
|
* @param {Object} args - Arguments containing role-specific model IDs
|
||||||
|
* @param {Object} context - Context object with session, mcpLog, projectRoot
|
||||||
|
* @returns {Object|null} Result if a model was set, null if no model setting was requested
|
||||||
|
*/
|
||||||
|
async function handleModelSetting(args, context) {
|
||||||
|
for (const role of MODEL_ROLES) {
|
||||||
|
const roleKey = `set${role.charAt(0).toUpperCase() + role.slice(1)}`; // setMain, setResearch, setFallback
|
||||||
|
|
||||||
|
if (args[roleKey]) {
|
||||||
|
// Determine provider hint from custom provider flags
|
||||||
|
let providerHint = undefined;
|
||||||
|
if (args.openrouter) providerHint = 'openrouter';
|
||||||
|
else if (args.ollama) providerHint = 'ollama';
|
||||||
|
else if (args.bedrock) providerHint = 'bedrock';
|
||||||
|
else if (args.azure) providerHint = 'azure';
|
||||||
|
else if (args.vertex) providerHint = 'vertex';
|
||||||
|
|
||||||
|
return await setModel(role, args[roleKey], {
|
||||||
|
...context,
|
||||||
|
providerHint
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null; // No model setting was requested
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get or update model configuration
|
* Get or update model configuration
|
||||||
* @param {Object} args - Arguments passed by the MCP tool
|
* @param {Object} args - Arguments passed by the MCP tool
|
||||||
@@ -31,16 +62,25 @@ export async function modelsDirect(args, log, context = {}) {
|
|||||||
log.info(`Executing models_direct with args: ${JSON.stringify(args)}`);
|
log.info(`Executing models_direct with args: ${JSON.stringify(args)}`);
|
||||||
log.info(`Using project root: ${projectRoot}`);
|
log.info(`Using project root: ${projectRoot}`);
|
||||||
|
|
||||||
// Validate flags: cannot use both openrouter and ollama simultaneously
|
// Validate flags: only one custom provider flag can be used simultaneously
|
||||||
if (args.openrouter && args.ollama) {
|
const customProviderFlags = [
|
||||||
|
args.openrouter,
|
||||||
|
args.ollama,
|
||||||
|
args.bedrock,
|
||||||
|
args.azure,
|
||||||
|
args.vertex
|
||||||
|
].filter(Boolean);
|
||||||
|
|
||||||
|
if (customProviderFlags.length > 1) {
|
||||||
log.error(
|
log.error(
|
||||||
'Error: Cannot use both openrouter and ollama flags simultaneously.'
|
'Error: Cannot use multiple custom provider flags simultaneously.'
|
||||||
);
|
);
|
||||||
return {
|
return {
|
||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'INVALID_ARGS',
|
code: 'INVALID_ARGS',
|
||||||
message: 'Cannot use both openrouter and ollama flags simultaneously.'
|
message:
|
||||||
|
'Cannot use multiple custom provider flags simultaneously. Choose only one: openrouter, ollama, bedrock, azure, or vertex.'
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -54,55 +94,22 @@ export async function modelsDirect(args, log, context = {}) {
|
|||||||
return await getAvailableModelsList({
|
return await getAvailableModelsList({
|
||||||
session,
|
session,
|
||||||
mcpLog,
|
mcpLog,
|
||||||
projectRoot // Pass projectRoot to function
|
projectRoot
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle setting a specific model
|
// Handle setting any model role using unified function
|
||||||
if (args.setMain) {
|
const modelContext = { session, mcpLog, projectRoot };
|
||||||
return await setModel('main', args.setMain, {
|
const modelSetResult = await handleModelSetting(args, modelContext);
|
||||||
session,
|
if (modelSetResult) {
|
||||||
mcpLog,
|
return modelSetResult;
|
||||||
projectRoot, // Pass projectRoot to function
|
|
||||||
providerHint: args.openrouter
|
|
||||||
? 'openrouter'
|
|
||||||
: args.ollama
|
|
||||||
? 'ollama'
|
|
||||||
: undefined // Pass hint
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (args.setResearch) {
|
|
||||||
return await setModel('research', args.setResearch, {
|
|
||||||
session,
|
|
||||||
mcpLog,
|
|
||||||
projectRoot, // Pass projectRoot to function
|
|
||||||
providerHint: args.openrouter
|
|
||||||
? 'openrouter'
|
|
||||||
: args.ollama
|
|
||||||
? 'ollama'
|
|
||||||
: undefined // Pass hint
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (args.setFallback) {
|
|
||||||
return await setModel('fallback', args.setFallback, {
|
|
||||||
session,
|
|
||||||
mcpLog,
|
|
||||||
projectRoot, // Pass projectRoot to function
|
|
||||||
providerHint: args.openrouter
|
|
||||||
? 'openrouter'
|
|
||||||
: args.ollama
|
|
||||||
? 'ollama'
|
|
||||||
: undefined // Pass hint
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default action: get current configuration
|
// Default action: get current configuration
|
||||||
return await getModelConfiguration({
|
return await getModelConfiguration({
|
||||||
session,
|
session,
|
||||||
mcpLog,
|
mcpLog,
|
||||||
projectRoot // Pass projectRoot to function
|
projectRoot
|
||||||
});
|
});
|
||||||
} finally {
|
} finally {
|
||||||
disableSilentMode();
|
disableSilentMode();
|
||||||
|
|||||||
@@ -55,7 +55,21 @@ export function registerModelsTool(server) {
|
|||||||
ollama: z
|
ollama: z
|
||||||
.boolean()
|
.boolean()
|
||||||
.optional()
|
.optional()
|
||||||
.describe('Indicates the set model ID is a custom Ollama model.')
|
.describe('Indicates the set model ID is a custom Ollama model.'),
|
||||||
|
bedrock: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.describe('Indicates the set model ID is a custom AWS Bedrock model.'),
|
||||||
|
azure: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.describe('Indicates the set model ID is a custom Azure OpenAI model.'),
|
||||||
|
vertex: z
|
||||||
|
.boolean()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
'Indicates the set model ID is a custom Google Vertex AI model.'
|
||||||
|
)
|
||||||
}),
|
}),
|
||||||
execute: withNormalizedProjectRoot(async (args, { log, session }) => {
|
execute: withNormalizedProjectRoot(async (args, { log, session }) => {
|
||||||
try {
|
try {
|
||||||
|
|||||||
@@ -294,30 +294,14 @@ async function runInteractiveSetup(projectRoot) {
|
|||||||
}
|
}
|
||||||
: null;
|
: null;
|
||||||
|
|
||||||
const customOpenRouterOption = {
|
// Define custom provider options
|
||||||
name: '* Custom OpenRouter model', // Symbol updated
|
const customProviderOptions = [
|
||||||
value: '__CUSTOM_OPENROUTER__'
|
{ name: '* Custom OpenRouter model', value: '__CUSTOM_OPENROUTER__' },
|
||||||
};
|
{ name: '* Custom Ollama model', value: '__CUSTOM_OLLAMA__' },
|
||||||
|
{ name: '* Custom Bedrock model', value: '__CUSTOM_BEDROCK__' },
|
||||||
const customOllamaOption = {
|
{ name: '* Custom Azure model', value: '__CUSTOM_AZURE__' },
|
||||||
name: '* Custom Ollama model', // Symbol updated
|
{ name: '* Custom Vertex model', value: '__CUSTOM_VERTEX__' }
|
||||||
value: '__CUSTOM_OLLAMA__'
|
];
|
||||||
};
|
|
||||||
|
|
||||||
const customBedrockOption = {
|
|
||||||
name: '* Custom Bedrock model', // Add Bedrock custom option
|
|
||||||
value: '__CUSTOM_BEDROCK__'
|
|
||||||
};
|
|
||||||
|
|
||||||
const customAzureOption = {
|
|
||||||
name: '* Custom Azure OpenAI model', // Add Azure custom option
|
|
||||||
value: '__CUSTOM_AZURE__'
|
|
||||||
};
|
|
||||||
|
|
||||||
const customVertexOption = {
|
|
||||||
name: '* Custom Vertex AI model', // Add Vertex custom option
|
|
||||||
value: '__CUSTOM_VERTEX__'
|
|
||||||
};
|
|
||||||
|
|
||||||
let choices = [];
|
let choices = [];
|
||||||
let defaultIndex = 0; // Default to 'Cancel'
|
let defaultIndex = 0; // Default to 'Cancel'
|
||||||
@@ -362,15 +346,8 @@ async function runInteractiveSetup(projectRoot) {
|
|||||||
if (noChangeOption) {
|
if (noChangeOption) {
|
||||||
systemOptions.push(noChangeOption);
|
systemOptions.push(noChangeOption);
|
||||||
}
|
}
|
||||||
systemOptions.push(cancelOption);
|
commonPrefix.push(cancelOption);
|
||||||
|
commonPrefix.push(...customProviderOptions);
|
||||||
const customOptions = [
|
|
||||||
customOpenRouterOption,
|
|
||||||
customOllamaOption,
|
|
||||||
customBedrockOption,
|
|
||||||
customAzureOption,
|
|
||||||
customVertexOption
|
|
||||||
];
|
|
||||||
|
|
||||||
const systemLength = systemOptions.length;
|
const systemLength = systemOptions.length;
|
||||||
|
|
||||||
@@ -486,6 +463,130 @@ async function runInteractiveSetup(projectRoot) {
|
|||||||
const coreOptionsSetup = { projectRoot }; // Pass root for setup actions
|
const coreOptionsSetup = { projectRoot }; // Pass root for setup actions
|
||||||
|
|
||||||
// Helper to handle setting a model (including custom)
|
// Helper to handle setting a model (including custom)
|
||||||
|
async function handleCustomProviderSelection(provider, role) {
|
||||||
|
const providerConfigs = {
|
||||||
|
bedrock: {
|
||||||
|
prompt: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`,
|
||||||
|
envVars: ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'],
|
||||||
|
successMessage:
|
||||||
|
'Custom Bedrock model will be used. No validation performed.'
|
||||||
|
},
|
||||||
|
azure: {
|
||||||
|
prompt: `Enter the custom Azure OpenAI deployment name for the ${role} role:`,
|
||||||
|
envVars: ['AZURE_OPENAI_API_KEY', 'AZURE_OPENAI_ENDPOINT'],
|
||||||
|
successMessage:
|
||||||
|
'Custom Azure model will be used. No validation performed.'
|
||||||
|
},
|
||||||
|
vertex: {
|
||||||
|
prompt: `Enter the custom Vertex AI model ID for the ${role} role (e.g., gemini-pro):`,
|
||||||
|
envVars: ['GOOGLE_APPLICATION_CREDENTIALS'],
|
||||||
|
successMessage:
|
||||||
|
'Custom Vertex model will be used. No validation performed.'
|
||||||
|
},
|
||||||
|
openrouter: {
|
||||||
|
prompt: `Enter the custom OpenRouter Model ID for the ${role} role:`,
|
||||||
|
envVars: [],
|
||||||
|
successMessage: '',
|
||||||
|
validate: true
|
||||||
|
},
|
||||||
|
ollama: {
|
||||||
|
prompt: `Enter the custom Ollama Model ID for the ${role} role:`,
|
||||||
|
envVars: [],
|
||||||
|
successMessage: '',
|
||||||
|
validate: true
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const config = providerConfigs[provider];
|
||||||
|
if (!config) {
|
||||||
|
return { success: false, error: `Unknown provider: ${provider}` };
|
||||||
|
}
|
||||||
|
|
||||||
|
const { customId } = await inquirer.prompt([
|
||||||
|
{
|
||||||
|
type: 'input',
|
||||||
|
name: 'customId',
|
||||||
|
message: config.prompt
|
||||||
|
}
|
||||||
|
]);
|
||||||
|
|
||||||
|
if (!customId) {
|
||||||
|
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
||||||
|
return { success: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check required environment variables
|
||||||
|
if (config.envVars.length > 0) {
|
||||||
|
const missingVars = config.envVars.filter(
|
||||||
|
(envVar) => !process.env[envVar]
|
||||||
|
);
|
||||||
|
if (missingVars.length > 0) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
`Error: Missing environment variables: ${missingVars.join(', ')}. Please set them before using custom ${provider} models.`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
return { success: false };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle validation for specific providers
|
||||||
|
if (provider === 'openrouter' && config.validate) {
|
||||||
|
const openRouterModels = await fetchOpenRouterModelsCLI();
|
||||||
|
if (
|
||||||
|
!openRouterModels ||
|
||||||
|
!openRouterModels.some((m) => m.id === customId)
|
||||||
|
) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
`Error: Model ID "${customId}" not found in the live OpenRouter model list. Please check the ID.`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
return { success: false };
|
||||||
|
}
|
||||||
|
} else if (provider === 'ollama' && config.validate) {
|
||||||
|
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
|
||||||
|
const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL);
|
||||||
|
if (ollamaModels === null) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
`Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
return { success: false };
|
||||||
|
} else if (!ollamaModels.some((m) => m.model === customId)) {
|
||||||
|
console.error(
|
||||||
|
chalk.red(
|
||||||
|
`Error: Model ID "${customId}" not found in the Ollama instance. Please verify the model is pulled and available.`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
chalk.yellow(
|
||||||
|
`You can check available models with: curl ${ollamaBaseURL}/tags`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
return { success: false };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.successMessage) {
|
||||||
|
console.log(
|
||||||
|
chalk.blue(
|
||||||
|
config.successMessage.replace(
|
||||||
|
'Custom Bedrock',
|
||||||
|
`Custom ${provider.charAt(0).toUpperCase() + provider.slice(1)}`
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
modelId: customId,
|
||||||
|
provider: provider
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
async function handleSetModel(role, selectedValue, currentModelId) {
|
async function handleSetModel(role, selectedValue, currentModelId) {
|
||||||
if (selectedValue === '__CANCEL__') {
|
if (selectedValue === '__CANCEL__') {
|
||||||
console.log(
|
console.log(
|
||||||
@@ -506,180 +607,50 @@ async function runInteractiveSetup(projectRoot) {
|
|||||||
let isCustomSelection = false;
|
let isCustomSelection = false;
|
||||||
|
|
||||||
if (selectedValue === '__CUSTOM_OPENROUTER__') {
|
if (selectedValue === '__CUSTOM_OPENROUTER__') {
|
||||||
isCustomSelection = true;
|
const result = await handleCustomProviderSelection('openrouter', role);
|
||||||
const { customId } = await inquirer.prompt([
|
if (!result.success) {
|
||||||
{
|
|
||||||
type: 'input',
|
|
||||||
name: 'customId',
|
|
||||||
message: `Enter the custom OpenRouter Model ID for the ${role} role:`
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
if (!customId) {
|
|
||||||
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
|
||||||
return true; // Continue setup, but don't set this role
|
|
||||||
}
|
|
||||||
modelIdToSet = customId;
|
|
||||||
providerHint = CUSTOM_PROVIDERS.OPENROUTER;
|
|
||||||
// Validate against live OpenRouter list
|
|
||||||
const openRouterModels = await fetchOpenRouterModelsCLI();
|
|
||||||
if (
|
|
||||||
!openRouterModels ||
|
|
||||||
!openRouterModels.some((m) => m.id === modelIdToSet)
|
|
||||||
) {
|
|
||||||
console.error(
|
|
||||||
chalk.red(
|
|
||||||
`Error: Model ID "${modelIdToSet}" not found in the live OpenRouter model list. Please check the ID.`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
setupSuccess = false;
|
setupSuccess = false;
|
||||||
return true; // Continue setup, but mark as failed
|
return true;
|
||||||
}
|
}
|
||||||
|
isCustomSelection = true;
|
||||||
|
modelIdToSet = result.modelId;
|
||||||
|
providerHint = result.provider;
|
||||||
} else if (selectedValue === '__CUSTOM_OLLAMA__') {
|
} else if (selectedValue === '__CUSTOM_OLLAMA__') {
|
||||||
|
const result = await handleCustomProviderSelection('ollama', role);
|
||||||
|
if (!result.success) {
|
||||||
|
setupSuccess = false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
isCustomSelection = true;
|
isCustomSelection = true;
|
||||||
const { customId } = await inquirer.prompt([
|
modelIdToSet = result.modelId;
|
||||||
{
|
providerHint = result.provider;
|
||||||
type: 'input',
|
|
||||||
name: 'customId',
|
|
||||||
message: `Enter the custom Ollama Model ID for the ${role} role:`
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
if (!customId) {
|
|
||||||
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
|
||||||
return true; // Continue setup, but don't set this role
|
|
||||||
}
|
|
||||||
modelIdToSet = customId;
|
|
||||||
providerHint = CUSTOM_PROVIDERS.OLLAMA;
|
|
||||||
// Get the Ollama base URL from config for this role
|
|
||||||
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
|
|
||||||
// Validate against live Ollama list
|
|
||||||
const ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL);
|
|
||||||
if (ollamaModels === null) {
|
|
||||||
console.error(
|
|
||||||
chalk.red(
|
|
||||||
`Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
setupSuccess = false;
|
|
||||||
return true; // Continue setup, but mark as failed
|
|
||||||
} else if (!ollamaModels.some((m) => m.model === modelIdToSet)) {
|
|
||||||
console.error(
|
|
||||||
chalk.red(
|
|
||||||
`Error: Model ID "${modelIdToSet}" not found in the Ollama instance. Please verify the model is pulled and available.`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
console.log(
|
|
||||||
chalk.yellow(
|
|
||||||
`You can check available models with: curl ${ollamaBaseURL}/tags`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
setupSuccess = false;
|
|
||||||
return true; // Continue setup, but mark as failed
|
|
||||||
}
|
|
||||||
} else if (selectedValue === '__CUSTOM_BEDROCK__') {
|
} else if (selectedValue === '__CUSTOM_BEDROCK__') {
|
||||||
isCustomSelection = true;
|
const result = await handleCustomProviderSelection('bedrock', role);
|
||||||
const { customId } = await inquirer.prompt([
|
if (!result.success) {
|
||||||
{
|
|
||||||
type: 'input',
|
|
||||||
name: 'customId',
|
|
||||||
message: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
if (!customId) {
|
|
||||||
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
|
||||||
return true; // Continue setup, but don't set this role
|
|
||||||
}
|
|
||||||
modelIdToSet = customId;
|
|
||||||
providerHint = CUSTOM_PROVIDERS.BEDROCK;
|
|
||||||
|
|
||||||
// Check if AWS environment variables exist
|
|
||||||
if (
|
|
||||||
!process.env.AWS_ACCESS_KEY_ID ||
|
|
||||||
!process.env.AWS_SECRET_ACCESS_KEY
|
|
||||||
) {
|
|
||||||
console.error(
|
|
||||||
chalk.red(
|
|
||||||
'Error: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Please set them before using custom Bedrock models.'
|
|
||||||
)
|
|
||||||
);
|
|
||||||
setupSuccess = false;
|
setupSuccess = false;
|
||||||
return true; // Continue setup, but mark as failed
|
return true;
|
||||||
}
|
}
|
||||||
|
isCustomSelection = true;
|
||||||
console.log(
|
modelIdToSet = result.modelId;
|
||||||
chalk.blue(
|
providerHint = result.provider;
|
||||||
`Custom Bedrock model "${modelIdToSet}" will be used. No validation performed.`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
} else if (selectedValue === '__CUSTOM_AZURE__') {
|
} else if (selectedValue === '__CUSTOM_AZURE__') {
|
||||||
isCustomSelection = true;
|
const result = await handleCustomProviderSelection('azure', role);
|
||||||
const { customId } = await inquirer.prompt([
|
if (!result.success) {
|
||||||
{
|
|
||||||
type: 'input',
|
|
||||||
name: 'customId',
|
|
||||||
message: `Enter the custom Azure OpenAI Model ID for the ${role} role (e.g., gpt-4o):`
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
if (!customId) {
|
|
||||||
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
|
||||||
return true; // Continue setup, but don't set this role
|
|
||||||
}
|
|
||||||
modelIdToSet = customId;
|
|
||||||
providerHint = CUSTOM_PROVIDERS.AZURE;
|
|
||||||
|
|
||||||
// Check if Azure environment variables exist
|
|
||||||
if (
|
|
||||||
!process.env.AZURE_OPENAI_API_KEY ||
|
|
||||||
!process.env.AZURE_OPENAI_ENDPOINT
|
|
||||||
) {
|
|
||||||
console.error(
|
|
||||||
chalk.red(
|
|
||||||
'Error: AZURE_OPENAI_API_KEY and/or AZURE_OPENAI_ENDPOINT environment variables are missing. Please set them before using custom Azure models.'
|
|
||||||
)
|
|
||||||
);
|
|
||||||
setupSuccess = false;
|
setupSuccess = false;
|
||||||
return true; // Continue setup, but mark as failed
|
return true;
|
||||||
}
|
}
|
||||||
|
isCustomSelection = true;
|
||||||
console.log(
|
modelIdToSet = result.modelId;
|
||||||
chalk.blue(
|
providerHint = result.provider;
|
||||||
`Custom Azure OpenAI model "${modelIdToSet}" will be used. No validation performed.`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
} else if (selectedValue === '__CUSTOM_VERTEX__') {
|
} else if (selectedValue === '__CUSTOM_VERTEX__') {
|
||||||
isCustomSelection = true;
|
const result = await handleCustomProviderSelection('vertex', role);
|
||||||
const { customId } = await inquirer.prompt([
|
if (!result.success) {
|
||||||
{
|
|
||||||
type: 'input',
|
|
||||||
name: 'customId',
|
|
||||||
message: `Enter the custom Vertex AI Model ID for the ${role} role (e.g., gemini-1.5-pro-002):`
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
if (!customId) {
|
|
||||||
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
|
||||||
return true; // Continue setup, but don't set this role
|
|
||||||
}
|
|
||||||
modelIdToSet = customId;
|
|
||||||
providerHint = CUSTOM_PROVIDERS.VERTEX;
|
|
||||||
|
|
||||||
// Check if Google/Vertex environment variables exist
|
|
||||||
if (
|
|
||||||
!process.env.GOOGLE_API_KEY &&
|
|
||||||
!process.env.GOOGLE_APPLICATION_CREDENTIALS
|
|
||||||
) {
|
|
||||||
console.error(
|
|
||||||
chalk.red(
|
|
||||||
'Error: Either GOOGLE_API_KEY or GOOGLE_APPLICATION_CREDENTIALS environment variable is required. Please set one before using custom Vertex models.'
|
|
||||||
)
|
|
||||||
);
|
|
||||||
setupSuccess = false;
|
setupSuccess = false;
|
||||||
return true; // Continue setup, but mark as failed
|
return true;
|
||||||
}
|
}
|
||||||
|
isCustomSelection = true;
|
||||||
console.log(
|
modelIdToSet = result.modelId;
|
||||||
chalk.blue(
|
providerHint = result.provider;
|
||||||
`Custom Vertex AI model "${modelIdToSet}" will be used. No validation performed.`
|
|
||||||
)
|
|
||||||
);
|
|
||||||
} else if (
|
} else if (
|
||||||
selectedValue &&
|
selectedValue &&
|
||||||
typeof selectedValue === 'object' &&
|
typeof selectedValue === 'object' &&
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import chalk from 'chalk';
|
|||||||
import { fileURLToPath } from 'url';
|
import { fileURLToPath } from 'url';
|
||||||
import { log, findProjectRoot, resolveEnvVariable } from './utils.js';
|
import { log, findProjectRoot, resolveEnvVariable } from './utils.js';
|
||||||
import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js';
|
import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js';
|
||||||
|
import { HOSTED_AI_PROVIDERS } from '../../src/constants/ai-providers.js';
|
||||||
import { findConfigPath } from '../../src/utils/path-utils.js';
|
import { findConfigPath } from '../../src/utils/path-utils.js';
|
||||||
import {
|
import {
|
||||||
VALIDATED_PROVIDERS,
|
VALIDATED_PROVIDERS,
|
||||||
|
|||||||
@@ -525,7 +525,7 @@ async function setModel(role, modelId, options = {}) {
|
|||||||
success: false,
|
success: false,
|
||||||
error: {
|
error: {
|
||||||
code: 'MODEL_NOT_FOUND_NO_HINT',
|
code: 'MODEL_NOT_FOUND_NO_HINT',
|
||||||
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.`
|
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, or --vertex.`
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
10
src/constants/ai-providers.js
Normal file
10
src/constants/ai-providers.js
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
/**
|
||||||
|
* AI Provider constants for Task Master application
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hosted AI providers that support custom model configurations.
|
||||||
|
* These providers are always considered valid since users can deploy
|
||||||
|
* custom models on these platforms.
|
||||||
|
*/
|
||||||
|
export const HOSTED_AI_PROVIDERS = ['bedrock', 'azure', 'vertex'];
|
||||||
Reference in New Issue
Block a user