Compare commits

...

3 Commits

Author SHA1 Message Date
Ralph Khreish
c5e1054b3c chore: fix CI 2025-06-20 14:12:47 +03:00
Ralph Khreish
52e6ef8792 chore: improve changelog 2025-06-20 14:04:23 +03:00
Ralph Khreish
51ce1f59de fix: providers config for azure, bedrock, and vertex 2025-06-20 14:04:23 +03:00
10 changed files with 531 additions and 147 deletions

View File

@@ -0,0 +1,11 @@
---
"task-master-ai": patch
---
Improve provider validation system with clean constants structure
- **Fixed "Invalid provider hint" errors**: Resolved validation failures for Azure, Vertex, and Bedrock providers
- **Improved search UX**: Integrated search for better model discovery with real-time filtering
- **Better organization**: Moved custom provider options to bottom of model selection with clear section separators
This change ensures all custom providers (Azure, Vertex, Bedrock, OpenRouter, Ollama) work correctly in `task-master models --setup`

View File

@@ -1,14 +1,14 @@
{ {
"models": { "models": {
"main": { "main": {
"provider": "anthropic", "provider": "vertex",
"modelId": "claude-sonnet-4-20250514", "modelId": "gemini-1.5-pro-002",
"maxTokens": 50000, "maxTokens": 50000,
"temperature": 0.2 "temperature": 0.2
}, },
"research": { "research": {
"provider": "perplexity", "provider": "perplexity",
"modelId": "sonar-pro", "modelId": "sonar",
"maxTokens": 8700, "maxTokens": 8700,
"temperature": 0.1 "temperature": 0.1
}, },
@@ -20,7 +20,6 @@
} }
}, },
"global": { "global": {
"userId": "1234567890",
"logLevel": "info", "logLevel": "info",
"debug": false, "debug": false,
"defaultSubtasks": 5, "defaultSubtasks": 5,
@@ -28,6 +27,7 @@
"projectName": "Taskmaster", "projectName": "Taskmaster",
"ollamaBaseURL": "http://localhost:11434/api", "ollamaBaseURL": "http://localhost:11434/api",
"bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com", "bedrockBaseURL": "https://bedrock.us-east-1.amazonaws.com",
"userId": "1234567890",
"azureBaseURL": "https://your-endpoint.azure.com/", "azureBaseURL": "https://your-endpoint.azure.com/",
"defaultTag": "master" "defaultTag": "master"
} }

35
package-lock.json generated
View File

@@ -20,6 +20,7 @@
"@ai-sdk/xai": "^1.2.15", "@ai-sdk/xai": "^1.2.15",
"@anthropic-ai/sdk": "^0.39.0", "@anthropic-ai/sdk": "^0.39.0",
"@aws-sdk/credential-providers": "^3.817.0", "@aws-sdk/credential-providers": "^3.817.0",
"@inquirer/search": "^3.0.15",
"@openrouter/ai-sdk-provider": "^0.4.5", "@openrouter/ai-sdk-provider": "^0.4.5",
"ai": "^4.3.10", "ai": "^4.3.10",
"boxen": "^8.0.1", "boxen": "^8.0.1",
@@ -2696,13 +2697,13 @@
} }
}, },
"node_modules/@inquirer/core": { "node_modules/@inquirer/core": {
"version": "10.1.9", "version": "10.1.13",
"resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.1.9.tgz", "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.1.13.tgz",
"integrity": "sha512-sXhVB8n20NYkUBfDYgizGHlpRVaCRjtuzNZA6xpALIUbkgfd2Hjz+DfEN6+h1BRnuxw0/P4jCIMjMsEOAMwAJw==", "integrity": "sha512-1viSxebkYN2nJULlzCxES6G9/stgHSepZ9LqqfdIGPHj5OHhiBUXVS0a6R0bEC2A+VL4D9w6QB66ebCr6HGllA==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@inquirer/figures": "^1.0.11", "@inquirer/figures": "^1.0.12",
"@inquirer/type": "^3.0.5", "@inquirer/type": "^3.0.7",
"ansi-escapes": "^4.3.2", "ansi-escapes": "^4.3.2",
"cli-width": "^4.1.0", "cli-width": "^4.1.0",
"mute-stream": "^2.0.0", "mute-stream": "^2.0.0",
@@ -2822,9 +2823,9 @@
} }
}, },
"node_modules/@inquirer/figures": { "node_modules/@inquirer/figures": {
"version": "1.0.11", "version": "1.0.12",
"resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.11.tgz", "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.12.tgz",
"integrity": "sha512-eOg92lvrn/aRUqbxRyvpEWnrvRuTYRifixHkYVpJiygTgVSBIHDqLh0SrMQXkafvULg3ck11V7xvR+zcgvpHFw==", "integrity": "sha512-MJttijd8rMFcKJC8NYmprWr6hD3r9Gd9qUC0XwPNwoEPWSMVJwA2MlXxF+nhZZNMY+HXsWa+o7KY2emWYIn0jQ==",
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">=18" "node": ">=18"
@@ -2946,14 +2947,14 @@
} }
}, },
"node_modules/@inquirer/search": { "node_modules/@inquirer/search": {
"version": "3.0.11", "version": "3.0.15",
"resolved": "https://registry.npmjs.org/@inquirer/search/-/search-3.0.11.tgz", "resolved": "https://registry.npmjs.org/@inquirer/search/-/search-3.0.15.tgz",
"integrity": "sha512-9CWQT0ikYcg6Ls3TOa7jljsD7PgjcsYEM0bYE+Gkz+uoW9u8eaJCRHJKkucpRE5+xKtaaDbrND+nPDoxzjYyew==", "integrity": "sha512-YBMwPxYBrADqyvP4nNItpwkBnGGglAvCLVW8u4pRmmvOsHUtCAUIMbUrLX5B3tFL1/WsLGdQ2HNzkqswMs5Uaw==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@inquirer/core": "^10.1.9", "@inquirer/core": "^10.1.13",
"@inquirer/figures": "^1.0.11", "@inquirer/figures": "^1.0.12",
"@inquirer/type": "^3.0.5", "@inquirer/type": "^3.0.7",
"yoctocolors-cjs": "^2.1.2" "yoctocolors-cjs": "^2.1.2"
}, },
"engines": { "engines": {
@@ -2993,9 +2994,9 @@
} }
}, },
"node_modules/@inquirer/type": { "node_modules/@inquirer/type": {
"version": "3.0.5", "version": "3.0.7",
"resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.5.tgz", "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.7.tgz",
"integrity": "sha512-ZJpeIYYueOz/i/ONzrfof8g89kNdO2hjGuvULROo3O8rlB2CRtSseE5KeirnyE4t/thAn/EwvS/vuQeJCn+NZg==", "integrity": "sha512-PfunHQcjwnju84L+ycmcMKB/pTPIngjUJvfnRhKY6FKPuYXlM4aQCb/nIdTFR6BEhMjFvngzvng/vBAJMZpLSA==",
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">=18" "node": ">=18"

View File

@@ -50,6 +50,7 @@
"@ai-sdk/xai": "^1.2.15", "@ai-sdk/xai": "^1.2.15",
"@anthropic-ai/sdk": "^0.39.0", "@anthropic-ai/sdk": "^0.39.0",
"@aws-sdk/credential-providers": "^3.817.0", "@aws-sdk/credential-providers": "^3.817.0",
"@inquirer/search": "^3.0.15",
"@openrouter/ai-sdk-provider": "^0.4.5", "@openrouter/ai-sdk-provider": "^0.4.5",
"ai": "^4.3.10", "ai": "^4.3.10",
"boxen": "^8.0.1", "boxen": "^8.0.1",

View File

@@ -11,6 +11,7 @@ import fs from 'fs';
import https from 'https'; import https from 'https';
import http from 'http'; import http from 'http';
import inquirer from 'inquirer'; import inquirer from 'inquirer';
import search from '@inquirer/search';
import ora from 'ora'; // Import ora import ora from 'ora'; // Import ora
import { import {
@@ -71,6 +72,8 @@ import {
getBaseUrlForRole getBaseUrlForRole
} from './config-manager.js'; } from './config-manager.js';
import { CUSTOM_PROVIDERS } from '../../src/constants/providers.js';
import { import {
COMPLEXITY_REPORT_FILE, COMPLEXITY_REPORT_FILE,
PRD_FILE, PRD_FILE,
@@ -306,6 +309,16 @@ async function runInteractiveSetup(projectRoot) {
value: '__CUSTOM_BEDROCK__' value: '__CUSTOM_BEDROCK__'
}; };
const customAzureOption = {
name: '* Custom Azure OpenAI model', // Add Azure custom option
value: '__CUSTOM_AZURE__'
};
const customVertexOption = {
name: '* Custom Vertex AI model', // Add Vertex custom option
value: '__CUSTOM_VERTEX__'
};
let choices = []; let choices = [];
let defaultIndex = 0; // Default to 'Cancel' let defaultIndex = 0; // Default to 'Cancel'
@@ -344,43 +357,50 @@ async function runInteractiveSetup(projectRoot) {
); );
} }
// Construct final choices list based on whether 'None' is allowed // Construct final choices list with custom options moved to bottom
const commonPrefix = []; const systemOptions = [];
if (noChangeOption) { if (noChangeOption) {
commonPrefix.push(noChangeOption); systemOptions.push(noChangeOption);
} }
commonPrefix.push(cancelOption); systemOptions.push(cancelOption);
commonPrefix.push(customOpenRouterOption);
commonPrefix.push(customOllamaOption);
commonPrefix.push(customBedrockOption);
const prefixLength = commonPrefix.length; // Initial prefix length const customOptions = [
customOpenRouterOption,
customOllamaOption,
customBedrockOption,
customAzureOption,
customVertexOption
];
const systemLength = systemOptions.length;
if (allowNone) { if (allowNone) {
choices = [ choices = [
...commonPrefix, ...systemOptions,
new inquirer.Separator(), new inquirer.Separator('── Standard Models ──'),
{ name: '⚪ None (disable)', value: null }, // Symbol updated { name: '⚪ None (disable)', value: null },
new inquirer.Separator(), ...roleChoices,
...roleChoices new inquirer.Separator('── Custom Providers ──'),
...customOptions
]; ];
// Adjust default index: Prefix + Sep1 + None + Sep2 (+3) // Adjust default index: System + Sep1 + None (+2)
const noneOptionIndex = prefixLength + 1; const noneOptionIndex = systemLength + 1;
defaultIndex = defaultIndex =
currentChoiceIndex !== -1 currentChoiceIndex !== -1
? currentChoiceIndex + prefixLength + 3 // Offset by prefix and separators ? currentChoiceIndex + systemLength + 2 // Offset by system options and separators
: noneOptionIndex; // Default to 'None' if no current model matched : noneOptionIndex; // Default to 'None' if no current model matched
} else { } else {
choices = [ choices = [
...commonPrefix, ...systemOptions,
new inquirer.Separator(), new inquirer.Separator('── Standard Models ──'),
...roleChoices, ...roleChoices,
new inquirer.Separator() new inquirer.Separator('── Custom Providers ──'),
...customOptions
]; ];
// Adjust default index: Prefix + Sep (+1) // Adjust default index: System + Sep (+1)
defaultIndex = defaultIndex =
currentChoiceIndex !== -1 currentChoiceIndex !== -1
? currentChoiceIndex + prefixLength + 1 // Offset by prefix and separator ? currentChoiceIndex + systemLength + 1 // Offset by system options and separator
: noChangeOption : noChangeOption
? 1 ? 1
: 0; // Default to 'No Change' if present, else 'Cancel' : 0; // Default to 'No Change' if present, else 'Cancel'
@@ -403,32 +423,63 @@ async function runInteractiveSetup(projectRoot) {
const researchPromptData = getPromptData('research'); const researchPromptData = getPromptData('research');
const fallbackPromptData = getPromptData('fallback', true); // Allow 'None' for fallback const fallbackPromptData = getPromptData('fallback', true); // Allow 'None' for fallback
const answers = await inquirer.prompt([ // Display helpful intro message
{ console.log(chalk.cyan('\n🎯 Interactive Model Setup'));
type: 'list', console.log(chalk.gray('━'.repeat(50)));
name: 'mainModel', console.log(chalk.yellow('💡 Navigation tips:'));
message: 'Select the main model for generation/updates:', console.log(chalk.gray(' • Type to search and filter options'));
choices: mainPromptData.choices, console.log(chalk.gray(' • Use ↑↓ arrow keys to navigate results'));
default: mainPromptData.default console.log(
}, chalk.gray(
{ ' • Standard models are listed first, custom providers at bottom'
type: 'list', )
name: 'researchModel', );
console.log(chalk.gray(' • Press Enter to select\n'));
// Helper function to create search source for models
const createSearchSource = (choices, defaultValue) => {
return (searchTerm = '') => {
const filteredChoices = choices.filter((choice) => {
if (choice.type === 'separator') return true; // Always show separators
const searchText = choice.name || '';
return searchText.toLowerCase().includes(searchTerm.toLowerCase());
});
return Promise.resolve(filteredChoices);
};
};
const answers = {};
// Main model selection
answers.mainModel = await search({
message: 'Select the main model for generation/updates:',
source: createSearchSource(mainPromptData.choices, mainPromptData.default),
pageSize: 15
});
if (answers.mainModel !== '__CANCEL__') {
// Research model selection
answers.researchModel = await search({
message: 'Select the research model:', message: 'Select the research model:',
choices: researchPromptData.choices, source: createSearchSource(
default: researchPromptData.default, researchPromptData.choices,
when: (ans) => ans.mainModel !== '__CANCEL__' researchPromptData.default
}, ),
{ pageSize: 15
type: 'list', });
name: 'fallbackModel',
message: 'Select the fallback model (optional):', if (answers.researchModel !== '__CANCEL__') {
choices: fallbackPromptData.choices, // Fallback model selection
default: fallbackPromptData.default, answers.fallbackModel = await search({
when: (ans) => message: 'Select the fallback model (optional):',
ans.mainModel !== '__CANCEL__' && ans.researchModel !== '__CANCEL__' source: createSearchSource(
fallbackPromptData.choices,
fallbackPromptData.default
),
pageSize: 15
});
} }
]); }
let setupSuccess = true; let setupSuccess = true;
let setupConfigModified = false; let setupConfigModified = false;
@@ -468,7 +519,7 @@ async function runInteractiveSetup(projectRoot) {
return true; // Continue setup, but don't set this role return true; // Continue setup, but don't set this role
} }
modelIdToSet = customId; modelIdToSet = customId;
providerHint = 'openrouter'; providerHint = CUSTOM_PROVIDERS.OPENROUTER;
// Validate against live OpenRouter list // Validate against live OpenRouter list
const openRouterModels = await fetchOpenRouterModelsCLI(); const openRouterModels = await fetchOpenRouterModelsCLI();
if ( if (
@@ -497,7 +548,7 @@ async function runInteractiveSetup(projectRoot) {
return true; // Continue setup, but don't set this role return true; // Continue setup, but don't set this role
} }
modelIdToSet = customId; modelIdToSet = customId;
providerHint = 'ollama'; providerHint = CUSTOM_PROVIDERS.OLLAMA;
// Get the Ollama base URL from config for this role // Get the Ollama base URL from config for this role
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot); const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
// Validate against live Ollama list // Validate against live Ollama list
@@ -538,7 +589,7 @@ async function runInteractiveSetup(projectRoot) {
return true; // Continue setup, but don't set this role return true; // Continue setup, but don't set this role
} }
modelIdToSet = customId; modelIdToSet = customId;
providerHint = 'bedrock'; providerHint = CUSTOM_PROVIDERS.BEDROCK;
// Check if AWS environment variables exist // Check if AWS environment variables exist
if ( if (
@@ -559,6 +610,76 @@ async function runInteractiveSetup(projectRoot) {
`Custom Bedrock model "${modelIdToSet}" will be used. No validation performed.` `Custom Bedrock model "${modelIdToSet}" will be used. No validation performed.`
) )
); );
} else if (selectedValue === '__CUSTOM_AZURE__') {
isCustomSelection = true;
const { customId } = await inquirer.prompt([
{
type: 'input',
name: 'customId',
message: `Enter the custom Azure OpenAI Model ID for the ${role} role (e.g., gpt-4o):`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true; // Continue setup, but don't set this role
}
modelIdToSet = customId;
providerHint = CUSTOM_PROVIDERS.AZURE;
// Check if Azure environment variables exist
if (
!process.env.AZURE_OPENAI_API_KEY ||
!process.env.AZURE_OPENAI_ENDPOINT
) {
console.error(
chalk.red(
'Error: AZURE_OPENAI_API_KEY and/or AZURE_OPENAI_ENDPOINT environment variables are missing. Please set them before using custom Azure models.'
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
console.log(
chalk.blue(
`Custom Azure OpenAI model "${modelIdToSet}" will be used. No validation performed.`
)
);
} else if (selectedValue === '__CUSTOM_VERTEX__') {
isCustomSelection = true;
const { customId } = await inquirer.prompt([
{
type: 'input',
name: 'customId',
message: `Enter the custom Vertex AI Model ID for the ${role} role (e.g., gemini-1.5-pro-002):`
}
]);
if (!customId) {
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
return true; // Continue setup, but don't set this role
}
modelIdToSet = customId;
providerHint = CUSTOM_PROVIDERS.VERTEX;
// Check if Google/Vertex environment variables exist
if (
!process.env.GOOGLE_API_KEY &&
!process.env.GOOGLE_APPLICATION_CREDENTIALS
) {
console.error(
chalk.red(
'Error: Either GOOGLE_API_KEY or GOOGLE_APPLICATION_CREDENTIALS environment variable is required. Please set one before using custom Vertex models.'
)
);
setupSuccess = false;
return true; // Continue setup, but mark as failed
}
console.log(
chalk.blue(
`Custom Vertex AI model "${modelIdToSet}" will be used. No validation performed.`
)
);
} else if ( } else if (
selectedValue && selectedValue &&
typeof selectedValue === 'object' && typeof selectedValue === 'object' &&
@@ -3307,6 +3428,14 @@ ${result.result}
'--bedrock', '--bedrock',
'Allow setting a custom Bedrock model ID (use with --set-*) ' 'Allow setting a custom Bedrock model ID (use with --set-*) '
) )
.option(
'--azure',
'Allow setting a custom Azure OpenAI model ID (use with --set-*) '
)
.option(
'--vertex',
'Allow setting a custom Vertex AI model ID (use with --set-*) '
)
.addHelpText( .addHelpText(
'after', 'after',
` `
@@ -3318,6 +3447,8 @@ Examples:
$ task-master models --set-main my-custom-model --ollama # Set custom Ollama model for main role $ task-master models --set-main my-custom-model --ollama # Set custom Ollama model for main role
$ task-master models --set-main anthropic.claude-3-sonnet-20240229-v1:0 --bedrock # Set custom Bedrock model for main role $ task-master models --set-main anthropic.claude-3-sonnet-20240229-v1:0 --bedrock # Set custom Bedrock model for main role
$ task-master models --set-main some/other-model --openrouter # Set custom OpenRouter model for main role $ task-master models --set-main some/other-model --openrouter # Set custom OpenRouter model for main role
$ task-master models --set-main gpt-4o --azure # Set custom Azure OpenAI model for main role
$ task-master models --set-main claude-3-5-sonnet@20241022 --vertex # Set custom Vertex AI model for main role
$ task-master models --setup # Run interactive setup` $ task-master models --setup # Run interactive setup`
) )
.action(async (options) => { .action(async (options) => {

View File

@@ -5,6 +5,12 @@ import { fileURLToPath } from 'url';
import { log, findProjectRoot, resolveEnvVariable } from './utils.js'; import { log, findProjectRoot, resolveEnvVariable } from './utils.js';
import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js'; import { LEGACY_CONFIG_FILE } from '../../src/constants/paths.js';
import { findConfigPath } from '../../src/utils/path-utils.js'; import { findConfigPath } from '../../src/utils/path-utils.js';
import {
VALIDATED_PROVIDERS,
CUSTOM_PROVIDERS,
CUSTOM_PROVIDERS_ARRAY,
ALL_PROVIDERS
} from '../../src/constants/providers.js';
// Calculate __dirname in ESM // Calculate __dirname in ESM
const __filename = fileURLToPath(import.meta.url); const __filename = fileURLToPath(import.meta.url);
@@ -29,9 +35,6 @@ try {
process.exit(1); // Exit if models can't be loaded process.exit(1); // Exit if models can't be loaded
} }
// Define valid providers dynamically from the loaded MODEL_MAP
const VALID_PROVIDERS = Object.keys(MODEL_MAP || {});
// Default configuration values (used if config file is missing or incomplete) // Default configuration values (used if config file is missing or incomplete)
const DEFAULTS = { const DEFAULTS = {
models: { models: {
@@ -233,12 +236,25 @@ function getConfig(explicitRoot = null, forceReload = false) {
} }
/** /**
* Validates if a provider name is in the list of supported providers. * Validates if a provider name is supported.
* Custom providers (azure, vertex, bedrock, openrouter, ollama) are always allowed.
* Validated providers must exist in the MODEL_MAP from supported-models.json.
* @param {string} providerName The name of the provider. * @param {string} providerName The name of the provider.
* @returns {boolean} True if the provider is valid, false otherwise. * @returns {boolean} True if the provider is valid, false otherwise.
*/ */
function validateProvider(providerName) { function validateProvider(providerName) {
return VALID_PROVIDERS.includes(providerName); // Custom providers are always allowed
if (CUSTOM_PROVIDERS_ARRAY.includes(providerName)) {
return true;
}
// Validated providers must exist in MODEL_MAP
if (VALIDATED_PROVIDERS.includes(providerName)) {
return !!(MODEL_MAP && MODEL_MAP[providerName]);
}
// Unknown providers are not allowed
return false;
} }
/** /**
@@ -736,11 +752,11 @@ function getUserId(explicitRoot = null) {
} }
/** /**
* Gets a list of all provider names defined in the MODEL_MAP. * Gets a list of all known provider names (both validated and custom).
* @returns {string[]} An array of provider names. * @returns {string[]} An array of all provider names.
*/ */
function getAllProviders() { function getAllProviders() {
return Object.keys(MODEL_MAP || {}); return ALL_PROVIDERS;
} }
function getBaseUrlForRole(role, explicitRoot = null) { function getBaseUrlForRole(role, explicitRoot = null) {
@@ -759,7 +775,9 @@ export {
// Validation // Validation
validateProvider, validateProvider,
validateProviderModelCombination, validateProviderModelCombination,
VALID_PROVIDERS, VALIDATED_PROVIDERS,
CUSTOM_PROVIDERS,
ALL_PROVIDERS,
MODEL_MAP, MODEL_MAP,
getAvailableModels, getAvailableModels,
// Role-specific getters (No env var overrides) // Role-specific getters (No env var overrides)

View File

@@ -3,28 +3,40 @@
{ {
"id": "claude-sonnet-4-20250514", "id": "claude-sonnet-4-20250514",
"swe_score": 0.727, "swe_score": 0.727,
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 }, "cost_per_1m_tokens": {
"input": 3.0,
"output": 15.0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 64000 "max_tokens": 64000
}, },
{ {
"id": "claude-opus-4-20250514", "id": "claude-opus-4-20250514",
"swe_score": 0.725, "swe_score": 0.725,
"cost_per_1m_tokens": { "input": 15.0, "output": 75.0 }, "cost_per_1m_tokens": {
"input": 15.0,
"output": 75.0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 32000 "max_tokens": 32000
}, },
{ {
"id": "claude-3-7-sonnet-20250219", "id": "claude-3-7-sonnet-20250219",
"swe_score": 0.623, "swe_score": 0.623,
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 }, "cost_per_1m_tokens": {
"input": 3.0,
"output": 15.0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 120000 "max_tokens": 120000
}, },
{ {
"id": "claude-3-5-sonnet-20241022", "id": "claude-3-5-sonnet-20241022",
"swe_score": 0.49, "swe_score": 0.49,
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 }, "cost_per_1m_tokens": {
"input": 3.0,
"output": 15.0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 64000 "max_tokens": 64000
} }
@@ -33,81 +45,120 @@
{ {
"id": "gpt-4o", "id": "gpt-4o",
"swe_score": 0.332, "swe_score": 0.332,
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 }, "cost_per_1m_tokens": {
"input": 2.5,
"output": 10.0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 16384 "max_tokens": 16384
}, },
{ {
"id": "o1", "id": "o1",
"swe_score": 0.489, "swe_score": 0.489,
"cost_per_1m_tokens": { "input": 15.0, "output": 60.0 }, "cost_per_1m_tokens": {
"input": 15.0,
"output": 60.0
},
"allowed_roles": ["main"] "allowed_roles": ["main"]
}, },
{ {
"id": "o3", "id": "o3",
"swe_score": 0.5, "swe_score": 0.5,
"cost_per_1m_tokens": { "input": 2.0, "output": 8.0 }, "cost_per_1m_tokens": {
"input": 2.0,
"output": 8.0
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
}, },
{ {
"id": "o3-mini", "id": "o3-mini",
"swe_score": 0.493, "swe_score": 0.493,
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 }, "cost_per_1m_tokens": {
"input": 1.1,
"output": 4.4
},
"allowed_roles": ["main"], "allowed_roles": ["main"],
"max_tokens": 100000 "max_tokens": 100000
}, },
{ {
"id": "o4-mini", "id": "o4-mini",
"swe_score": 0.45, "swe_score": 0.45,
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 }, "cost_per_1m_tokens": {
"input": 1.1,
"output": 4.4
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
}, },
{ {
"id": "o1-mini", "id": "o1-mini",
"swe_score": 0.4, "swe_score": 0.4,
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 }, "cost_per_1m_tokens": {
"input": 1.1,
"output": 4.4
},
"allowed_roles": ["main"] "allowed_roles": ["main"]
}, },
{ {
"id": "o1-pro", "id": "o1-pro",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 150.0, "output": 600.0 }, "cost_per_1m_tokens": {
"input": 150.0,
"output": 600.0
},
"allowed_roles": ["main"] "allowed_roles": ["main"]
}, },
{ {
"id": "gpt-4-5-preview", "id": "gpt-4-5-preview",
"swe_score": 0.38, "swe_score": 0.38,
"cost_per_1m_tokens": { "input": 75.0, "output": 150.0 }, "cost_per_1m_tokens": {
"input": 75.0,
"output": 150.0
},
"allowed_roles": ["main"] "allowed_roles": ["main"]
}, },
{ {
"id": "gpt-4-1-mini", "id": "gpt-4-1-mini",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.4, "output": 1.6 }, "cost_per_1m_tokens": {
"input": 0.4,
"output": 1.6
},
"allowed_roles": ["main"] "allowed_roles": ["main"]
}, },
{ {
"id": "gpt-4-1-nano", "id": "gpt-4-1-nano",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 }, "cost_per_1m_tokens": {
"input": 0.1,
"output": 0.4
},
"allowed_roles": ["main"] "allowed_roles": ["main"]
}, },
{ {
"id": "gpt-4o-mini", "id": "gpt-4o-mini",
"swe_score": 0.3, "swe_score": 0.3,
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 }, "cost_per_1m_tokens": {
"input": 0.15,
"output": 0.6
},
"allowed_roles": ["main"] "allowed_roles": ["main"]
}, },
{ {
"id": "gpt-4o-search-preview", "id": "gpt-4o-search-preview",
"swe_score": 0.33, "swe_score": 0.33,
"cost_per_1m_tokens": { "input": 2.5, "output": 10.0 }, "cost_per_1m_tokens": {
"input": 2.5,
"output": 10.0
},
"allowed_roles": ["research"] "allowed_roles": ["research"]
}, },
{ {
"id": "gpt-4o-mini-search-preview", "id": "gpt-4o-mini-search-preview",
"swe_score": 0.3, "swe_score": 0.3,
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 }, "cost_per_1m_tokens": {
"input": 0.15,
"output": 0.6
},
"allowed_roles": ["research"] "allowed_roles": ["research"]
} }
], ],
@@ -136,7 +187,10 @@
{ {
"id": "gemini-2.0-flash", "id": "gemini-2.0-flash",
"swe_score": 0.518, "swe_score": 0.518,
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 }, "cost_per_1m_tokens": {
"input": 0.15,
"output": 0.6
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1048000 "max_tokens": 1048000
}, },
@@ -152,35 +206,50 @@
{ {
"id": "sonar-pro", "id": "sonar-pro",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 3, "output": 15 }, "cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "research"], "allowed_roles": ["main", "research"],
"max_tokens": 8700 "max_tokens": 8700
}, },
{ {
"id": "sonar", "id": "sonar",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 1, "output": 1 }, "cost_per_1m_tokens": {
"input": 1,
"output": 1
},
"allowed_roles": ["research"], "allowed_roles": ["research"],
"max_tokens": 8700 "max_tokens": 8700
}, },
{ {
"id": "deep-research", "id": "deep-research",
"swe_score": 0.211, "swe_score": 0.211,
"cost_per_1m_tokens": { "input": 2, "output": 8 }, "cost_per_1m_tokens": {
"input": 2,
"output": 8
},
"allowed_roles": ["research"], "allowed_roles": ["research"],
"max_tokens": 8700 "max_tokens": 8700
}, },
{ {
"id": "sonar-reasoning-pro", "id": "sonar-reasoning-pro",
"swe_score": 0.211, "swe_score": 0.211,
"cost_per_1m_tokens": { "input": 2, "output": 8 }, "cost_per_1m_tokens": {
"input": 2,
"output": 8
},
"allowed_roles": ["main", "research", "fallback"], "allowed_roles": ["main", "research", "fallback"],
"max_tokens": 8700 "max_tokens": 8700
}, },
{ {
"id": "sonar-reasoning", "id": "sonar-reasoning",
"swe_score": 0.211, "swe_score": 0.211,
"cost_per_1m_tokens": { "input": 1, "output": 5 }, "cost_per_1m_tokens": {
"input": 1,
"output": 5
},
"allowed_roles": ["main", "research", "fallback"], "allowed_roles": ["main", "research", "fallback"],
"max_tokens": 8700 "max_tokens": 8700
} }
@@ -190,7 +259,10 @@
"id": "grok-3", "id": "grok-3",
"name": "Grok 3", "name": "Grok 3",
"swe_score": null, "swe_score": null,
"cost_per_1m_tokens": { "input": 3, "output": 15 }, "cost_per_1m_tokens": {
"input": 3,
"output": 15
},
"allowed_roles": ["main", "fallback", "research"], "allowed_roles": ["main", "fallback", "research"],
"max_tokens": 131072 "max_tokens": 131072
}, },
@@ -198,7 +270,10 @@
"id": "grok-3-fast", "id": "grok-3-fast",
"name": "Grok 3 Fast", "name": "Grok 3 Fast",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 5, "output": 25 }, "cost_per_1m_tokens": {
"input": 5,
"output": 25
},
"allowed_roles": ["main", "fallback", "research"], "allowed_roles": ["main", "fallback", "research"],
"max_tokens": 131072 "max_tokens": 131072
} }
@@ -207,43 +282,64 @@
{ {
"id": "devstral:latest", "id": "devstral:latest",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
}, },
{ {
"id": "qwen3:latest", "id": "qwen3:latest",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
}, },
{ {
"id": "qwen3:14b", "id": "qwen3:14b",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
}, },
{ {
"id": "qwen3:32b", "id": "qwen3:32b",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
}, },
{ {
"id": "mistral-small3.1:latest", "id": "mistral-small3.1:latest",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
}, },
{ {
"id": "llama3.3:latest", "id": "llama3.3:latest",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
}, },
{ {
"id": "phi4:latest", "id": "phi4:latest",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"] "allowed_roles": ["main", "fallback"]
} }
], ],
@@ -251,175 +347,250 @@
{ {
"id": "google/gemini-2.5-flash-preview-05-20", "id": "google/gemini-2.5-flash-preview-05-20",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 }, "cost_per_1m_tokens": {
"input": 0.15,
"output": 0.6
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1048576 "max_tokens": 1048576
}, },
{ {
"id": "google/gemini-2.5-flash-preview-05-20:thinking", "id": "google/gemini-2.5-flash-preview-05-20:thinking",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.15, "output": 3.5 }, "cost_per_1m_tokens": {
"input": 0.15,
"output": 3.5
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1048576 "max_tokens": 1048576
}, },
{ {
"id": "google/gemini-2.5-pro-exp-03-25", "id": "google/gemini-2.5-pro-exp-03-25",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1000000 "max_tokens": 1000000
}, },
{ {
"id": "deepseek/deepseek-chat-v3-0324:free", "id": "deepseek/deepseek-chat-v3-0324:free",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 163840 "max_tokens": 163840
}, },
{ {
"id": "deepseek/deepseek-chat-v3-0324", "id": "deepseek/deepseek-chat-v3-0324",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.27, "output": 1.1 }, "cost_per_1m_tokens": {
"input": 0.27,
"output": 1.1
},
"allowed_roles": ["main"], "allowed_roles": ["main"],
"max_tokens": 64000 "max_tokens": 64000
}, },
{ {
"id": "openai/gpt-4.1", "id": "openai/gpt-4.1",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 2, "output": 8 }, "cost_per_1m_tokens": {
"input": 2,
"output": 8
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1000000 "max_tokens": 1000000
}, },
{ {
"id": "openai/gpt-4.1-mini", "id": "openai/gpt-4.1-mini",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.4, "output": 1.6 }, "cost_per_1m_tokens": {
"input": 0.4,
"output": 1.6
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1000000 "max_tokens": 1000000
}, },
{ {
"id": "openai/gpt-4.1-nano", "id": "openai/gpt-4.1-nano",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 }, "cost_per_1m_tokens": {
"input": 0.1,
"output": 0.4
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1000000 "max_tokens": 1000000
}, },
{ {
"id": "openai/o3", "id": "openai/o3",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 10, "output": 40 }, "cost_per_1m_tokens": {
"input": 10,
"output": 40
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 200000 "max_tokens": 200000
}, },
{ {
"id": "openai/codex-mini", "id": "openai/codex-mini",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 1.5, "output": 6 }, "cost_per_1m_tokens": {
"input": 1.5,
"output": 6
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 100000 "max_tokens": 100000
}, },
{ {
"id": "openai/gpt-4o-mini", "id": "openai/gpt-4o-mini",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 }, "cost_per_1m_tokens": {
"input": 0.15,
"output": 0.6
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 100000 "max_tokens": 100000
}, },
{ {
"id": "openai/o4-mini", "id": "openai/o4-mini",
"swe_score": 0.45, "swe_score": 0.45,
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 }, "cost_per_1m_tokens": {
"input": 1.1,
"output": 4.4
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 100000 "max_tokens": 100000
}, },
{ {
"id": "openai/o4-mini-high", "id": "openai/o4-mini-high",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 }, "cost_per_1m_tokens": {
"input": 1.1,
"output": 4.4
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 100000 "max_tokens": 100000
}, },
{ {
"id": "openai/o1-pro", "id": "openai/o1-pro",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 150, "output": 600 }, "cost_per_1m_tokens": {
"input": 150,
"output": 600
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 100000 "max_tokens": 100000
}, },
{ {
"id": "meta-llama/llama-3.3-70b-instruct", "id": "meta-llama/llama-3.3-70b-instruct",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 120, "output": 600 }, "cost_per_1m_tokens": {
"input": 120,
"output": 600
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1048576 "max_tokens": 1048576
}, },
{ {
"id": "meta-llama/llama-4-maverick", "id": "meta-llama/llama-4-maverick",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.18, "output": 0.6 }, "cost_per_1m_tokens": {
"input": 0.18,
"output": 0.6
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1000000 "max_tokens": 1000000
}, },
{ {
"id": "meta-llama/llama-4-scout", "id": "meta-llama/llama-4-scout",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.08, "output": 0.3 }, "cost_per_1m_tokens": {
"input": 0.08,
"output": 0.3
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1000000 "max_tokens": 1000000
}, },
{ {
"id": "qwen/qwen-max", "id": "qwen/qwen-max",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 1.6, "output": 6.4 }, "cost_per_1m_tokens": {
"input": 1.6,
"output": 6.4
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 32768 "max_tokens": 32768
}, },
{ {
"id": "qwen/qwen-turbo", "id": "qwen/qwen-turbo",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.05, "output": 0.2 }, "cost_per_1m_tokens": {
"input": 0.05,
"output": 0.2
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 1000000 "max_tokens": 1000000
}, },
{ {
"id": "qwen/qwen3-235b-a22b", "id": "qwen/qwen3-235b-a22b",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.14, "output": 2 }, "cost_per_1m_tokens": {
"input": 0.14,
"output": 2
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 24000 "max_tokens": 24000
}, },
{ {
"id": "mistralai/mistral-small-3.1-24b-instruct:free", "id": "mistralai/mistral-small-3.1-24b-instruct:free",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 96000 "max_tokens": 96000
}, },
{ {
"id": "mistralai/mistral-small-3.1-24b-instruct", "id": "mistralai/mistral-small-3.1-24b-instruct",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.1, "output": 0.3 }, "cost_per_1m_tokens": {
"input": 0.1,
"output": 0.3
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 128000 "max_tokens": 128000
}, },
{ {
"id": "mistralai/devstral-small", "id": "mistralai/devstral-small",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.1, "output": 0.3 }, "cost_per_1m_tokens": {
"input": 0.1,
"output": 0.3
},
"allowed_roles": ["main"], "allowed_roles": ["main"],
"max_tokens": 110000 "max_tokens": 110000
}, },
{ {
"id": "mistralai/mistral-nemo", "id": "mistralai/mistral-nemo",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0.03, "output": 0.07 }, "cost_per_1m_tokens": {
"input": 0.03,
"output": 0.07
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 100000 "max_tokens": 100000
}, },
{ {
"id": "thudm/glm-4-32b:free", "id": "thudm/glm-4-32b:free",
"swe_score": 0, "swe_score": 0,
"cost_per_1m_tokens": { "input": 0, "output": 0 }, "cost_per_1m_tokens": {
"input": 0,
"output": 0
},
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 32768 "max_tokens": 32768
} }

View File

@@ -23,6 +23,7 @@ import {
} from '../config-manager.js'; } from '../config-manager.js';
import { findConfigPath } from '../../../src/utils/path-utils.js'; import { findConfigPath } from '../../../src/utils/path-utils.js';
import { log } from '../utils.js'; import { log } from '../utils.js';
import { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js';
/** /**
* Fetches the list of models from OpenRouter API. * Fetches the list of models from OpenRouter API.
@@ -440,7 +441,7 @@ async function setModel(role, modelId, options = {}) {
} else { } else {
// Either not found internally, OR found but under a DIFFERENT provider than hinted. // Either not found internally, OR found but under a DIFFERENT provider than hinted.
// Proceed with custom logic based ONLY on the hint. // Proceed with custom logic based ONLY on the hint.
if (providerHint === 'openrouter') { if (providerHint === CUSTOM_PROVIDERS.OPENROUTER) {
// Check OpenRouter ONLY because hint was openrouter // Check OpenRouter ONLY because hint was openrouter
report('info', `Checking OpenRouter for ${modelId} (as hinted)...`); report('info', `Checking OpenRouter for ${modelId} (as hinted)...`);
const openRouterModels = await fetchOpenRouterModels(); const openRouterModels = await fetchOpenRouterModels();
@@ -449,7 +450,7 @@ async function setModel(role, modelId, options = {}) {
openRouterModels && openRouterModels &&
openRouterModels.some((m) => m.id === modelId) openRouterModels.some((m) => m.id === modelId)
) { ) {
determinedProvider = 'openrouter'; determinedProvider = CUSTOM_PROVIDERS.OPENROUTER;
// Check if this is a free model (ends with :free) // Check if this is a free model (ends with :free)
if (modelId.endsWith(':free')) { if (modelId.endsWith(':free')) {
@@ -465,7 +466,7 @@ async function setModel(role, modelId, options = {}) {
`Model ID "${modelId}" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.` `Model ID "${modelId}" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.`
); );
} }
} else if (providerHint === 'ollama') { } else if (providerHint === CUSTOM_PROVIDERS.OLLAMA) {
// Check Ollama ONLY because hint was ollama // Check Ollama ONLY because hint was ollama
report('info', `Checking Ollama for ${modelId} (as hinted)...`); report('info', `Checking Ollama for ${modelId} (as hinted)...`);
@@ -479,7 +480,7 @@ async function setModel(role, modelId, options = {}) {
`Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.` `Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
); );
} else if (ollamaModels.some((m) => m.model === modelId)) { } else if (ollamaModels.some((m) => m.model === modelId)) {
determinedProvider = 'ollama'; determinedProvider = CUSTOM_PROVIDERS.OLLAMA;
warningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`; warningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`;
report('warn', warningMessage); report('warn', warningMessage);
} else { } else {
@@ -489,13 +490,23 @@ async function setModel(role, modelId, options = {}) {
`Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}` `Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`
); );
} }
} else if (providerHint === 'bedrock') { } else if (providerHint === CUSTOM_PROVIDERS.BEDROCK) {
// Set provider without model validation since Bedrock models are managed by AWS // Set provider without model validation since Bedrock models are managed by AWS
determinedProvider = 'bedrock'; determinedProvider = CUSTOM_PROVIDERS.BEDROCK;
warningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`; warningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`;
report('warn', warningMessage); report('warn', warningMessage);
} else if (providerHint === CUSTOM_PROVIDERS.AZURE) {
// Set provider without model validation since Azure models are managed by Azure
determinedProvider = CUSTOM_PROVIDERS.AZURE;
warningMessage = `Warning: Custom Azure model '${modelId}' set. Please ensure the model deployment is valid and accessible in your Azure account.`;
report('warn', warningMessage);
} else if (providerHint === CUSTOM_PROVIDERS.VERTEX) {
// Set provider without model validation since Vertex models are managed by Google Cloud
determinedProvider = CUSTOM_PROVIDERS.VERTEX;
warningMessage = `Warning: Custom Vertex AI model '${modelId}' set. Please ensure the model is valid and accessible in your Google Cloud project.`;
report('warn', warningMessage);
} else { } else {
// Invalid provider hint - should not happen // Invalid provider hint - should not happen with our constants
throw new Error(`Invalid provider hint received: ${providerHint}`); throw new Error(`Invalid provider hint received: ${providerHint}`);
} }
} }

View File

@@ -0,0 +1,32 @@
/**
* Provider validation constants
* Defines which providers should be validated against the supported-models.json file
*/
// Providers that have predefined model lists and should be validated
export const VALIDATED_PROVIDERS = [
'anthropic',
'openai',
'google',
'perplexity',
'xai',
'mistral'
];
// Custom providers object for easy named access
export const CUSTOM_PROVIDERS = {
AZURE: 'azure',
VERTEX: 'vertex',
BEDROCK: 'bedrock',
OPENROUTER: 'openrouter',
OLLAMA: 'ollama'
};
// Custom providers array (for backward compatibility and iteration)
export const CUSTOM_PROVIDERS_ARRAY = Object.values(CUSTOM_PROVIDERS);
// All known providers (for reference)
export const ALL_PROVIDERS = [
...VALIDATED_PROVIDERS,
...CUSTOM_PROVIDERS_ARRAY
];

View File

@@ -713,17 +713,25 @@ describe('isConfigFilePresent', () => {
// --- getAllProviders Tests --- // --- getAllProviders Tests ---
describe('getAllProviders', () => { describe('getAllProviders', () => {
test('should return list of providers from supported-models.json', () => { test('should return all providers from ALL_PROVIDERS constant', () => {
// Arrange: Ensure config is loaded with real data // Arrange: Ensure config is loaded with real data
configManager.getConfig(null, true); // Force load using the mock that returns real data configManager.getConfig(null, true); // Force load using the mock that returns real data
// Act // Act
const providers = configManager.getAllProviders(); const providers = configManager.getAllProviders();
// Assert // Assert
// Assert against the actual keys in the REAL loaded data // getAllProviders() should return the same as the ALL_PROVIDERS constant
const expectedProviders = Object.keys(REAL_SUPPORTED_MODELS_DATA); expect(providers).toEqual(configManager.ALL_PROVIDERS);
expect(providers).toEqual(expect.arrayContaining(expectedProviders)); expect(providers.length).toBe(configManager.ALL_PROVIDERS.length);
expect(providers.length).toBe(expectedProviders.length);
// Verify it includes both validated and custom providers
expect(providers).toEqual(
expect.arrayContaining(configManager.VALIDATED_PROVIDERS)
);
expect(providers).toEqual(
expect.arrayContaining(Object.values(configManager.CUSTOM_PROVIDERS))
);
}); });
}); });