feat(models): implement custom model support for ollama/openrouter
Adds the ability for users to specify custom model IDs for Ollama and OpenRouter providers, bypassing the internal supported model list.
- Introduces --ollama and --openrouter flags for the 'task-master models --set-<role>' command.
- Updates the interactive 'task-master models --setup' to include options for entering custom Ollama/OpenRouter IDs.
- Implements live validation against the OpenRouter API when a custom OpenRouter ID is provided.
- Refines the model setting logic to prioritize explicit provider flags/choices.
- Adds warnings when custom models are set.
- Updates the changeset file.
This commit is contained in:
11
.changeset/ninety-ghosts-relax.md
Normal file
11
.changeset/ninety-ghosts-relax.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
'task-master-ai': patch
|
||||
---
|
||||
|
||||
- feat: Add custom model ID support for Ollama and OpenRouter providers.
|
||||
- Adds the `--ollama` and `--openrouter` flags to `task-master models --set-<role>` command to set models for those providers outside of the support models list.
|
||||
- Updated `task-master models --setup` interactive mode with options to explicitly enter custom Ollama or OpenRouter model IDs.
|
||||
- Implemented live validation against OpenRouter API (`/api/v1/models`) when setting a custom OpenRouter model ID (via flag or setup).
|
||||
- Refined logic to prioritize explicit provider flags/choices over internal model list lookups in case of ID conflicts.
|
||||
- Added warnings when setting custom/unvalidated models.
|
||||
- We obviously don't recommend going with a custom, unproven model. If you do and find performance is good, please let us know so we can add it to the list of supported models.
|
||||
@@ -1,14 +1,14 @@
|
||||
{
|
||||
"models": {
|
||||
"main": {
|
||||
"provider": "xai",
|
||||
"modelId": "grok-3",
|
||||
"provider": "openrouter",
|
||||
"modelId": "meta-llama/llama-4-maverick:free",
|
||||
"maxTokens": 100000,
|
||||
"temperature": 0.2
|
||||
},
|
||||
"research": {
|
||||
"provider": "xai",
|
||||
"modelId": "grok-3",
|
||||
"provider": "perplexity",
|
||||
"modelId": "sonar-pro",
|
||||
"maxTokens": 8700,
|
||||
"temperature": 0.1
|
||||
},
|
||||
|
||||
9434
context/open-router-docs.txt
Normal file
9434
context/open-router-docs.txt
Normal file
File diff suppressed because it is too large
Load Diff
@@ -37,6 +37,20 @@ export async function modelsDirect(args, log, context = {}) {
|
||||
log.info(`Executing models_direct with args: ${JSON.stringify(args)}`);
|
||||
log.info(`Using project root: ${projectRoot}`);
|
||||
|
||||
// Validate flags: cannot use both openrouter and ollama simultaneously
|
||||
if (args.openrouter && args.ollama) {
|
||||
log.error(
|
||||
'Error: Cannot use both openrouter and ollama flags simultaneously.'
|
||||
);
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'INVALID_ARGS',
|
||||
message: 'Cannot use both openrouter and ollama flags simultaneously.'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
enableSilentMode();
|
||||
|
||||
@@ -55,7 +69,12 @@ export async function modelsDirect(args, log, context = {}) {
|
||||
return await setModel('main', args.setMain, {
|
||||
session,
|
||||
mcpLog: logWrapper,
|
||||
projectRoot // Pass projectRoot to function
|
||||
projectRoot, // Pass projectRoot to function
|
||||
providerHint: args.openrouter
|
||||
? 'openrouter'
|
||||
: args.ollama
|
||||
? 'ollama'
|
||||
: undefined // Pass hint
|
||||
});
|
||||
}
|
||||
|
||||
@@ -63,7 +82,12 @@ export async function modelsDirect(args, log, context = {}) {
|
||||
return await setModel('research', args.setResearch, {
|
||||
session,
|
||||
mcpLog: logWrapper,
|
||||
projectRoot // Pass projectRoot to function
|
||||
projectRoot, // Pass projectRoot to function
|
||||
providerHint: args.openrouter
|
||||
? 'openrouter'
|
||||
: args.ollama
|
||||
? 'ollama'
|
||||
: undefined // Pass hint
|
||||
});
|
||||
}
|
||||
|
||||
@@ -71,7 +95,12 @@ export async function modelsDirect(args, log, context = {}) {
|
||||
return await setModel('fallback', args.setFallback, {
|
||||
session,
|
||||
mcpLog: logWrapper,
|
||||
projectRoot // Pass projectRoot to function
|
||||
projectRoot, // Pass projectRoot to function
|
||||
providerHint: args.openrouter
|
||||
? 'openrouter'
|
||||
: args.ollama
|
||||
? 'ollama'
|
||||
: undefined // Pass hint
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,15 @@ export function registerModelsTool(server) {
|
||||
projectRoot: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('The directory of the project. Must be an absolute path.')
|
||||
.describe('The directory of the project. Must be an absolute path.'),
|
||||
openrouter: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Indicates the set model ID is a custom OpenRouter model.'),
|
||||
ollama: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Indicates the set model ID is a custom Ollama model.')
|
||||
}),
|
||||
execute: async (args, { log, session }) => {
|
||||
try {
|
||||
|
||||
@@ -87,6 +87,50 @@ async function runInteractiveSetup(projectRoot) {
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Helper function to fetch OpenRouter models (duplicated for CLI context)
|
||||
function fetchOpenRouterModelsCLI() {
|
||||
return new Promise((resolve) => {
|
||||
const options = {
|
||||
hostname: 'openrouter.ai',
|
||||
path: '/api/v1/models',
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: 'application/json'
|
||||
}
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk;
|
||||
});
|
||||
res.on('end', () => {
|
||||
if (res.statusCode === 200) {
|
||||
try {
|
||||
const parsedData = JSON.parse(data);
|
||||
resolve(parsedData.data || []); // Return the array of models
|
||||
} catch (e) {
|
||||
console.error('Error parsing OpenRouter response:', e);
|
||||
resolve(null); // Indicate failure
|
||||
}
|
||||
} else {
|
||||
console.error(
|
||||
`OpenRouter API request failed with status code: ${res.statusCode}`
|
||||
);
|
||||
resolve(null); // Indicate failure
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', (e) => {
|
||||
console.error('Error fetching OpenRouter models:', e);
|
||||
resolve(null); // Indicate failure
|
||||
});
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
// Get available models - pass projectRoot
|
||||
const availableModelsResult = await getAvailableModelsList({ projectRoot });
|
||||
if (!availableModelsResult.success) {
|
||||
@@ -119,64 +163,71 @@ async function runInteractiveSetup(projectRoot) {
|
||||
|
||||
console.log(chalk.cyan.bold('\nInteractive Model Setup:'));
|
||||
|
||||
// Find all available models for setup options
|
||||
const allModelsForSetup = availableModelsForSetup
|
||||
.filter((model) => !model.modelId.startsWith('[')) // Filter out placeholders like [ollama-any]
|
||||
.map((model) => ({
|
||||
// Helper to get choices and default index for a role
|
||||
const getPromptData = (role, allowNone = false) => {
|
||||
// Filter models FIRST based on allowed roles
|
||||
const filteredModels = availableModelsForSetup
|
||||
.filter((model) => !model.modelId.startsWith('[')) // Filter out placeholders
|
||||
.filter((model) => model.allowedRoles?.includes(role)); // Filter by allowed role
|
||||
|
||||
// THEN map the filtered models to the choice format
|
||||
const roleChoices = filteredModels.map((model) => ({
|
||||
name: `${model.provider} / ${model.modelId}`,
|
||||
value: { provider: model.provider, id: model.modelId }
|
||||
}));
|
||||
|
||||
if (allModelsForSetup.length === 0) {
|
||||
console.error(
|
||||
chalk.red('Error: No selectable models found in configuration.')
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Helper to get choices and default index for a role
|
||||
const getPromptData = (role, allowNone = false) => {
|
||||
const roleChoices = allModelsForSetup.filter((modelChoice) =>
|
||||
availableModelsForSetup
|
||||
.find((m) => m.modelId === modelChoice.value.id)
|
||||
?.allowedRoles?.includes(role)
|
||||
);
|
||||
|
||||
let choices = [...roleChoices];
|
||||
let choices = []; // Initialize choices array
|
||||
let defaultIndex = -1;
|
||||
const currentModelId = currentModels[role]?.modelId;
|
||||
|
||||
// --- Add Custom/Cancel Options --- //
|
||||
const customOpenRouterOption = {
|
||||
name: 'OpenRouter (Enter Custom ID)',
|
||||
value: '__CUSTOM_OPENROUTER__'
|
||||
};
|
||||
const customOllamaOption = {
|
||||
name: 'Ollama (Enter Custom ID)',
|
||||
value: '__CUSTOM_OLLAMA__'
|
||||
};
|
||||
const cancelOption = { name: 'Cancel setup', value: '__CANCEL__' };
|
||||
|
||||
// Find the index of the current model within the role-specific choices *before* adding custom options
|
||||
const currentChoiceIndex = roleChoices.findIndex(
|
||||
(c) => c.value.id === currentModelId
|
||||
);
|
||||
|
||||
if (allowNone) {
|
||||
choices = [
|
||||
cancelOption,
|
||||
customOpenRouterOption,
|
||||
customOllamaOption,
|
||||
new inquirer.Separator(),
|
||||
{ name: 'None (disable)', value: null },
|
||||
new inquirer.Separator(),
|
||||
...roleChoices
|
||||
];
|
||||
if (currentModelId) {
|
||||
const foundIndex = roleChoices.findIndex(
|
||||
(m) => m.value.id === currentModelId
|
||||
);
|
||||
defaultIndex = foundIndex !== -1 ? foundIndex + 2 : 0; // +2 for None and Separator
|
||||
} else {
|
||||
defaultIndex = 0; // Default to 'None'
|
||||
}
|
||||
// Adjust default index for extra options (Cancel, CustomOR, CustomOllama, Sep1, None, Sep2)
|
||||
defaultIndex = currentChoiceIndex !== -1 ? currentChoiceIndex + 6 : 4; // Default to 'None' if no current model matched
|
||||
} else {
|
||||
if (currentModelId) {
|
||||
defaultIndex = roleChoices.findIndex(
|
||||
(m) => m.value.id === currentModelId
|
||||
);
|
||||
}
|
||||
// Ensure defaultIndex is valid, otherwise default to 0
|
||||
if (defaultIndex < 0 || defaultIndex >= roleChoices.length) {
|
||||
defaultIndex = 0;
|
||||
}
|
||||
choices = [
|
||||
cancelOption,
|
||||
customOpenRouterOption,
|
||||
customOllamaOption,
|
||||
new inquirer.Separator(),
|
||||
...roleChoices
|
||||
];
|
||||
// Adjust default index for extra options (Cancel, CustomOR, CustomOllama, Sep)
|
||||
defaultIndex = currentChoiceIndex !== -1 ? currentChoiceIndex + 4 : 0; // Default to 'Cancel' if no current model matched
|
||||
}
|
||||
|
||||
// Add Cancel option
|
||||
const cancelOption = { name: 'Cancel setup', value: '__CANCEL__' };
|
||||
choices = [cancelOption, new inquirer.Separator(), ...choices];
|
||||
// Adjust default index accounting for Cancel and Separator
|
||||
defaultIndex = defaultIndex !== -1 ? defaultIndex + 2 : 0;
|
||||
// Ensure defaultIndex is valid within the final choices array length
|
||||
if (defaultIndex < 0 || defaultIndex >= choices.length) {
|
||||
// If default calculation failed or pointed outside bounds, reset intelligently
|
||||
defaultIndex = 0; // Default to 'Cancel'
|
||||
console.warn(
|
||||
`Warning: Could not determine default model for role '${role}'. Defaulting to 'Cancel'.`
|
||||
); // Add warning
|
||||
}
|
||||
|
||||
return { choices, default: defaultIndex };
|
||||
};
|
||||
@@ -213,132 +264,169 @@ async function runInteractiveSetup(projectRoot) {
|
||||
}
|
||||
]);
|
||||
|
||||
// Check if user canceled at any point
|
||||
if (
|
||||
answers.mainModel === '__CANCEL__' ||
|
||||
answers.researchModel === '__CANCEL__' ||
|
||||
answers.fallbackModel === '__CANCEL__'
|
||||
) {
|
||||
console.log(chalk.yellow('\nSetup canceled. No changes made.'));
|
||||
return; // Return instead of exit to allow display logic to run maybe? Or exit? Let's return for now.
|
||||
}
|
||||
|
||||
// Apply changes using setModel
|
||||
let setupSuccess = true;
|
||||
let setupConfigModified = false;
|
||||
const coreOptionsSetup = { projectRoot }; // Pass root for setup actions
|
||||
|
||||
// Set Main Model
|
||||
if (
|
||||
answers.mainModel?.id &&
|
||||
answers.mainModel.id !== currentModels.main?.modelId
|
||||
) {
|
||||
const result = await setModel(
|
||||
'main',
|
||||
answers.mainModel.id,
|
||||
coreOptionsSetup
|
||||
);
|
||||
if (result.success) {
|
||||
// Helper to handle setting a model (including custom)
|
||||
async function handleSetModel(role, selectedValue, currentModelId) {
|
||||
if (selectedValue === '__CANCEL__') {
|
||||
console.log(
|
||||
chalk.blue(
|
||||
`Selected main model: ${result.data.provider} / ${result.data.modelId}`
|
||||
)
|
||||
chalk.yellow(`\nSetup canceled during ${role} model selection.`)
|
||||
);
|
||||
setupConfigModified = true;
|
||||
} else {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error setting main model: ${result.error?.message || 'Unknown'}`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
return false; // Indicate cancellation
|
||||
}
|
||||
}
|
||||
|
||||
// Set Research Model
|
||||
if (
|
||||
answers.researchModel?.id &&
|
||||
answers.researchModel.id !== currentModels.research?.modelId
|
||||
) {
|
||||
const result = await setModel(
|
||||
'research',
|
||||
answers.researchModel.id,
|
||||
coreOptionsSetup
|
||||
);
|
||||
if (result.success) {
|
||||
console.log(
|
||||
chalk.blue(
|
||||
`Selected research model: ${result.data.provider} / ${result.data.modelId}`
|
||||
)
|
||||
);
|
||||
setupConfigModified = true;
|
||||
} else {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error setting research model: ${result.error?.message || 'Unknown'}`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
}
|
||||
}
|
||||
let modelIdToSet = null;
|
||||
let providerHint = null;
|
||||
let isCustomSelection = false;
|
||||
|
||||
// Set Fallback Model - Handle 'None' selection
|
||||
const currentFallbackId = currentModels.fallback?.modelId;
|
||||
const selectedFallbackValue = answers.fallbackModel; // Could be null or model object
|
||||
const selectedFallbackId = selectedFallbackValue?.id; // Undefined if null
|
||||
|
||||
if (selectedFallbackId !== currentFallbackId) {
|
||||
// Compare IDs
|
||||
if (selectedFallbackId) {
|
||||
// User selected a specific fallback model
|
||||
const result = await setModel(
|
||||
'fallback',
|
||||
selectedFallbackId,
|
||||
coreOptionsSetup
|
||||
);
|
||||
if (result.success) {
|
||||
console.log(
|
||||
chalk.blue(
|
||||
`Selected fallback model: ${result.data.provider} / ${result.data.modelId}`
|
||||
)
|
||||
);
|
||||
setupConfigModified = true;
|
||||
} else {
|
||||
if (selectedValue === '__CUSTOM_OPENROUTER__') {
|
||||
isCustomSelection = true;
|
||||
const { customId } = await inquirer.prompt([
|
||||
{
|
||||
type: 'input',
|
||||
name: 'customId',
|
||||
message: `Enter the custom OpenRouter Model ID for the ${role} role:`
|
||||
}
|
||||
]);
|
||||
if (!customId) {
|
||||
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
||||
return true; // Continue setup, but don't set this role
|
||||
}
|
||||
modelIdToSet = customId;
|
||||
providerHint = 'openrouter';
|
||||
// Validate against live OpenRouter list
|
||||
const openRouterModels = await fetchOpenRouterModelsCLI();
|
||||
if (
|
||||
!openRouterModels ||
|
||||
!openRouterModels.some((m) => m.id === modelIdToSet)
|
||||
) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Error setting fallback model: ${result.error?.message || 'Unknown'}`
|
||||
`Error: Model ID "${modelIdToSet}" not found in the live OpenRouter model list. Please check the ID.`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
return true; // Continue setup, but mark as failed
|
||||
}
|
||||
} else if (currentFallbackId) {
|
||||
// User selected 'None' but a fallback was previously set
|
||||
// Need to explicitly clear it in the config file
|
||||
const currentCfg = getConfig(projectRoot); // Pass root
|
||||
if (currentCfg?.models?.fallback) {
|
||||
// Check if fallback exists before clearing
|
||||
currentCfg.models.fallback = {
|
||||
...currentCfg.models.fallback, // Keep params like tokens/temp
|
||||
provider: undefined,
|
||||
modelId: undefined
|
||||
};
|
||||
if (writeConfig(currentCfg, projectRoot)) {
|
||||
// Pass root
|
||||
console.log(chalk.blue('Fallback model disabled.'));
|
||||
} else if (selectedValue === '__CUSTOM_OLLAMA__') {
|
||||
isCustomSelection = true;
|
||||
const { customId } = await inquirer.prompt([
|
||||
{
|
||||
type: 'input',
|
||||
name: 'customId',
|
||||
message: `Enter the custom Ollama Model ID for the ${role} role:`
|
||||
}
|
||||
]);
|
||||
if (!customId) {
|
||||
console.log(chalk.yellow('No custom ID entered. Skipping role.'));
|
||||
return true;
|
||||
}
|
||||
modelIdToSet = customId;
|
||||
providerHint = 'ollama';
|
||||
} else if (
|
||||
selectedValue &&
|
||||
typeof selectedValue === 'object' &&
|
||||
selectedValue.id
|
||||
) {
|
||||
// Standard model selected from list
|
||||
modelIdToSet = selectedValue.id;
|
||||
providerHint = selectedValue.provider; // Provider is known
|
||||
} else if (selectedValue === null && role === 'fallback') {
|
||||
// Handle disabling fallback
|
||||
modelIdToSet = null;
|
||||
providerHint = null;
|
||||
} else if (selectedValue) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
`Internal Error: Unexpected selection value for ${role}: ${JSON.stringify(selectedValue)}`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Only proceed if there's a change to be made
|
||||
if (modelIdToSet !== currentModelId) {
|
||||
if (modelIdToSet) {
|
||||
// Set a specific model (standard or custom)
|
||||
const result = await setModel(role, modelIdToSet, {
|
||||
...coreOptionsSetup,
|
||||
providerHint // Pass the hint
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(
|
||||
chalk.blue(
|
||||
`Set ${role} model: ${result.data.provider} / ${result.data.modelId}`
|
||||
)
|
||||
);
|
||||
if (result.data.warning) {
|
||||
// Display warning if returned by setModel
|
||||
console.log(chalk.yellow(result.data.warning));
|
||||
}
|
||||
setupConfigModified = true;
|
||||
} else {
|
||||
console.error(
|
||||
chalk.red('Failed to disable fallback model in config file.')
|
||||
chalk.red(
|
||||
`Error setting ${role} model: ${result.error?.message || 'Unknown'}`
|
||||
)
|
||||
);
|
||||
setupSuccess = false;
|
||||
}
|
||||
} else {
|
||||
console.log(chalk.blue('Fallback model was already disabled.'));
|
||||
} else if (role === 'fallback') {
|
||||
// Disable fallback model
|
||||
const currentCfg = getConfig(projectRoot);
|
||||
if (currentCfg?.models?.fallback?.modelId) {
|
||||
// Check if it was actually set before clearing
|
||||
currentCfg.models.fallback = {
|
||||
...currentCfg.models.fallback,
|
||||
provider: undefined,
|
||||
modelId: undefined
|
||||
};
|
||||
if (writeConfig(currentCfg, projectRoot)) {
|
||||
console.log(chalk.blue('Fallback model disabled.'));
|
||||
setupConfigModified = true;
|
||||
} else {
|
||||
console.error(
|
||||
chalk.red('Failed to disable fallback model in config file.')
|
||||
);
|
||||
setupSuccess = false;
|
||||
}
|
||||
} else {
|
||||
console.log(chalk.blue('Fallback model was already disabled.'));
|
||||
}
|
||||
}
|
||||
}
|
||||
// No action needed if fallback was already null/undefined and user selected None
|
||||
return true; // Indicate setup should continue
|
||||
}
|
||||
|
||||
// Process answers using the handler
|
||||
if (
|
||||
!(await handleSetModel(
|
||||
'main',
|
||||
answers.mainModel,
|
||||
currentModels.main?.modelId
|
||||
))
|
||||
)
|
||||
return;
|
||||
if (
|
||||
!(await handleSetModel(
|
||||
'research',
|
||||
answers.researchModel,
|
||||
currentModels.research?.modelId
|
||||
))
|
||||
)
|
||||
return;
|
||||
if (
|
||||
!(await handleSetModel(
|
||||
'fallback',
|
||||
answers.fallbackModel,
|
||||
currentModels.fallback?.modelId
|
||||
))
|
||||
)
|
||||
return;
|
||||
|
||||
if (setupSuccess && setupConfigModified) {
|
||||
console.log(chalk.green.bold('\nModel setup complete!'));
|
||||
} else if (setupSuccess && !setupConfigModified) {
|
||||
@@ -1880,9 +1968,27 @@ function registerCommands(programInstance) {
|
||||
'Set the model to use if the primary fails'
|
||||
)
|
||||
.option('--setup', 'Run interactive setup to configure models')
|
||||
.option(
|
||||
'--openrouter',
|
||||
'Allow setting a custom OpenRouter model ID (use with --set-*) '
|
||||
)
|
||||
.option(
|
||||
'--ollama',
|
||||
'Allow setting a custom Ollama model ID (use with --set-*) '
|
||||
)
|
||||
.action(async (options) => {
|
||||
const projectRoot = findProjectRoot(); // Find project root for context
|
||||
|
||||
// Validate flags: cannot use both --openrouter and --ollama simultaneously
|
||||
if (options.openrouter && options.ollama) {
|
||||
console.error(
|
||||
chalk.red(
|
||||
'Error: Cannot use both --openrouter and --ollama flags simultaneously.'
|
||||
)
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// --- Handle Interactive Setup ---
|
||||
if (options.setup) {
|
||||
// Assume runInteractiveSetup is defined elsewhere in this file
|
||||
@@ -1894,10 +2000,18 @@ function registerCommands(programInstance) {
|
||||
let modelUpdated = false;
|
||||
if (options.setMain) {
|
||||
const result = await setModel('main', options.setMain, {
|
||||
projectRoot
|
||||
projectRoot,
|
||||
providerHint: options.openrouter
|
||||
? 'openrouter'
|
||||
: options.ollama
|
||||
? 'ollama'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
if (result.data.warning) {
|
||||
console.log(chalk.yellow(result.data.warning));
|
||||
}
|
||||
modelUpdated = true;
|
||||
} else {
|
||||
console.error(chalk.red(`❌ Error: ${result.error.message}`));
|
||||
@@ -1906,10 +2020,18 @@ function registerCommands(programInstance) {
|
||||
}
|
||||
if (options.setResearch) {
|
||||
const result = await setModel('research', options.setResearch, {
|
||||
projectRoot
|
||||
projectRoot,
|
||||
providerHint: options.openrouter
|
||||
? 'openrouter'
|
||||
: options.ollama
|
||||
? 'ollama'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
if (result.data.warning) {
|
||||
console.log(chalk.yellow(result.data.warning));
|
||||
}
|
||||
modelUpdated = true;
|
||||
} else {
|
||||
console.error(chalk.red(`❌ Error: ${result.error.message}`));
|
||||
@@ -1917,10 +2039,18 @@ function registerCommands(programInstance) {
|
||||
}
|
||||
if (options.setFallback) {
|
||||
const result = await setModel('fallback', options.setFallback, {
|
||||
projectRoot
|
||||
projectRoot,
|
||||
providerHint: options.openrouter
|
||||
? 'openrouter'
|
||||
: options.ollama
|
||||
? 'ollama'
|
||||
: undefined
|
||||
});
|
||||
if (result.success) {
|
||||
console.log(chalk.green(`✅ ${result.data.message}`));
|
||||
if (result.data.warning) {
|
||||
console.log(chalk.yellow(result.data.warning));
|
||||
}
|
||||
modelUpdated = true;
|
||||
} else {
|
||||
console.error(chalk.red(`❌ Error: ${result.error.message}`));
|
||||
|
||||
@@ -179,6 +179,39 @@
|
||||
"max_tokens": 8700
|
||||
}
|
||||
],
|
||||
"xai": [
|
||||
{
|
||||
"id": "grok-3",
|
||||
"name": "Grok 3",
|
||||
"swe_score": null,
|
||||
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-mini",
|
||||
"name": "Grok 3 Mini",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.3, "output": 0.5 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-fast",
|
||||
"name": "Grok 3 Fast",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 5, "output": 25 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-mini-fast",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.6, "output": 4 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"max_tokens": 131072
|
||||
}
|
||||
],
|
||||
"ollama": [
|
||||
{
|
||||
"id": "gemma3:27b",
|
||||
@@ -228,70 +261,205 @@
|
||||
"id": "google/gemini-2.0-flash-001",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-pro-exp-03-25:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-chat-v3-0324:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-pro-preview-03-25",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.25, "output": 10 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 163840
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-chat-v3-0324",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.27, "output": 1.1 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 64000
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-r1:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"]
|
||||
}
|
||||
],
|
||||
"xai": [
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 163840
|
||||
},
|
||||
|
||||
{
|
||||
"id": "grok-3",
|
||||
"name": "Grok 3",
|
||||
"swe_score": null,
|
||||
"cost_per_1m_tokens": { "input": 3, "output": 15 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"id": "microsoft/mai-ds-r1:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 163840
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-pro-preview-03-25",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.25, "output": 10 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-2.5-flash-preview:thinking",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.15, "output": 3.5 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 65535
|
||||
},
|
||||
{
|
||||
"id": "openai/o3",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 10, "output": 40 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 200000
|
||||
},
|
||||
{
|
||||
"id": "openai/o4-mini",
|
||||
"swe_score": 0.45,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "openai/o4-mini-high",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "openai/o1-pro",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 150, "output": 600 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 100000
|
||||
},
|
||||
{
|
||||
"id": "meta-llama/llama-3.3-70b-instruct",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 120, "output": 600 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
{
|
||||
"id": "meta-llama/llama-4-maverick:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 256000
|
||||
},
|
||||
{
|
||||
"id": "meta-llama/llama-4-maverick",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.17, "output": 0.6 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
{
|
||||
"id": "meta-llama/llama-4-scout:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 512000
|
||||
},
|
||||
{
|
||||
"id": "meta-llama/llama-4-scout",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.08, "output": 0.3 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1048576
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-12b-it:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-mini",
|
||||
"name": "Grok 3 Mini",
|
||||
"id": "google/gemma-3-12b-it",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.3, "output": 0.5 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"cost_per_1m_tokens": { "input": 50, "output": 100 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok3-fast",
|
||||
"name": "Grok 3 Fast",
|
||||
"id": "google/gemma-3-27b-it:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 5, "output": 25 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 96000
|
||||
},
|
||||
{
|
||||
"id": "google/gemma-3-27b-it",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 100, "output": 200 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "grok-3-mini-fast",
|
||||
"id": "qwen/qwq-32b:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.6, "output": 4 },
|
||||
"allowed_roles": ["main", "fallback", "research"],
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 40000
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwq-32b",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 150, "output": 200 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwen-max",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 1.6, "output": 6.4 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 32768
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwen-turbo",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.05, "output": 0.2 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 1000000
|
||||
},
|
||||
{
|
||||
"id": "mistralai/mistral-small-3.1-24b-instruct:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 96000
|
||||
},
|
||||
{
|
||||
"id": "mistralai/mistral-small-3.1-24b-instruct",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0.1, "output": 0.3 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 128000
|
||||
},
|
||||
{
|
||||
"id": "thudm/glm-4-32b:free",
|
||||
"swe_score": 0,
|
||||
"cost_per_1m_tokens": { "input": 0, "output": 0 },
|
||||
"allowed_roles": ["main", "fallback"],
|
||||
"max_tokens": 32768
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
import https from 'https';
|
||||
import {
|
||||
getMainModelId,
|
||||
getResearchModelId,
|
||||
@@ -21,6 +22,52 @@ import {
|
||||
getAllProviders
|
||||
} from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Fetches the list of models from OpenRouter API.
|
||||
* @returns {Promise<Array|null>} A promise that resolves with the list of model IDs or null if fetch fails.
|
||||
*/
|
||||
function fetchOpenRouterModels() {
|
||||
return new Promise((resolve) => {
|
||||
const options = {
|
||||
hostname: 'openrouter.ai',
|
||||
path: '/api/v1/models',
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: 'application/json'
|
||||
}
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', (chunk) => {
|
||||
data += chunk;
|
||||
});
|
||||
res.on('end', () => {
|
||||
if (res.statusCode === 200) {
|
||||
try {
|
||||
const parsedData = JSON.parse(data);
|
||||
resolve(parsedData.data || []); // Return the array of models
|
||||
} catch (e) {
|
||||
console.error('Error parsing OpenRouter response:', e);
|
||||
resolve(null); // Indicate failure
|
||||
}
|
||||
} else {
|
||||
console.error(
|
||||
`OpenRouter API request failed with status code: ${res.statusCode}`
|
||||
);
|
||||
resolve(null); // Indicate failure
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', (e) => {
|
||||
console.error('Error fetching OpenRouter models:', e);
|
||||
resolve(null); // Indicate failure
|
||||
});
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current model configuration
|
||||
* @param {Object} [options] - Options for the operation
|
||||
@@ -256,13 +303,14 @@ async function getAvailableModelsList(options = {}) {
|
||||
* @param {string} role - The model role to update ('main', 'research', 'fallback')
|
||||
* @param {string} modelId - The model ID to set for the role
|
||||
* @param {Object} [options] - Options for the operation
|
||||
* @param {string} [options.providerHint] - Provider hint if already determined ('openrouter' or 'ollama')
|
||||
* @param {Object} [options.session] - Session object containing environment variables (for MCP)
|
||||
* @param {Function} [options.mcpLog] - MCP logger object (for MCP)
|
||||
* @param {string} [options.projectRoot] - Project root directory
|
||||
* @returns {Object} RESTful response with result of update operation
|
||||
*/
|
||||
async function setModel(role, modelId, options = {}) {
|
||||
const { mcpLog, projectRoot } = options;
|
||||
const { mcpLog, projectRoot, providerHint } = options;
|
||||
|
||||
const report = (level, ...args) => {
|
||||
if (mcpLog && typeof mcpLog[level] === 'function') {
|
||||
@@ -325,15 +373,85 @@ async function setModel(role, modelId, options = {}) {
|
||||
try {
|
||||
const availableModels = getAvailableModels(projectRoot);
|
||||
const currentConfig = getConfig(projectRoot);
|
||||
let determinedProvider = null; // Initialize provider
|
||||
let warningMessage = null;
|
||||
|
||||
// Find the model data
|
||||
const modelData = availableModels.find((m) => m.id === modelId);
|
||||
if (!modelData || !modelData.provider) {
|
||||
// Find the model data in internal list initially to see if it exists at all
|
||||
let modelData = availableModels.find((m) => m.id === modelId);
|
||||
|
||||
// --- Revised Logic: Prioritize providerHint --- //
|
||||
|
||||
if (providerHint) {
|
||||
// Hint provided (--ollama or --openrouter flag used)
|
||||
if (modelData && modelData.provider === providerHint) {
|
||||
// Found internally AND provider matches the hint
|
||||
determinedProvider = providerHint;
|
||||
report(
|
||||
'info',
|
||||
`Model ${modelId} found internally with matching provider hint ${determinedProvider}.`
|
||||
);
|
||||
} else {
|
||||
// Either not found internally, OR found but under a DIFFERENT provider than hinted.
|
||||
// Proceed with custom logic based ONLY on the hint.
|
||||
if (providerHint === 'openrouter') {
|
||||
// Check OpenRouter ONLY because hint was openrouter
|
||||
report('info', `Checking OpenRouter for ${modelId} (as hinted)...`);
|
||||
const openRouterModels = await fetchOpenRouterModels();
|
||||
|
||||
if (
|
||||
openRouterModels &&
|
||||
openRouterModels.some((m) => m.id === modelId)
|
||||
) {
|
||||
determinedProvider = 'openrouter';
|
||||
warningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`;
|
||||
report('warn', warningMessage);
|
||||
} else {
|
||||
// Hinted as OpenRouter but not found in live check
|
||||
throw new Error(
|
||||
`Model ID "${modelId}" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.`
|
||||
);
|
||||
}
|
||||
} else if (providerHint === 'ollama') {
|
||||
// Hinted as Ollama - set provider directly WITHOUT checking OpenRouter
|
||||
determinedProvider = 'ollama';
|
||||
warningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`;
|
||||
report('warn', warningMessage);
|
||||
} else {
|
||||
// Invalid provider hint - should not happen
|
||||
throw new Error(`Invalid provider hint received: ${providerHint}`);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No hint provided (flags not used)
|
||||
if (modelData) {
|
||||
// Found internally, use the provider from the internal list
|
||||
determinedProvider = modelData.provider;
|
||||
report(
|
||||
'info',
|
||||
`Model ${modelId} found internally with provider ${determinedProvider}.`
|
||||
);
|
||||
} else {
|
||||
// Model not found and no provider hint was given
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'MODEL_NOT_FOUND_NO_HINT',
|
||||
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.`
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// --- End of Revised Logic --- //
|
||||
|
||||
// At this point, we should have a determinedProvider if the model is valid (internally or custom)
|
||||
if (!determinedProvider) {
|
||||
// This case acts as a safeguard
|
||||
return {
|
||||
success: false,
|
||||
error: {
|
||||
code: 'MODEL_NOT_FOUND',
|
||||
message: `Model ID "${modelId}" not found or invalid in available models.`
|
||||
code: 'PROVIDER_UNDETERMINED',
|
||||
message: `Could not determine the provider for model ID "${modelId}".`
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -341,7 +459,7 @@ async function setModel(role, modelId, options = {}) {
|
||||
// Update configuration
|
||||
currentConfig.models[role] = {
|
||||
...currentConfig.models[role], // Keep existing params like maxTokens
|
||||
provider: modelData.provider,
|
||||
provider: determinedProvider,
|
||||
modelId: modelId
|
||||
};
|
||||
|
||||
@@ -357,18 +475,17 @@ async function setModel(role, modelId, options = {}) {
|
||||
};
|
||||
}
|
||||
|
||||
report(
|
||||
'info',
|
||||
`Set ${role} model to: ${modelId} (Provider: ${modelData.provider})`
|
||||
);
|
||||
const successMessage = `Successfully set ${role} model to ${modelId} (Provider: ${determinedProvider})`;
|
||||
report('info', successMessage);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
role,
|
||||
provider: modelData.provider,
|
||||
provider: determinedProvider,
|
||||
modelId,
|
||||
message: `Successfully set ${role} model to ${modelId} (Provider: ${modelData.provider})`
|
||||
message: successMessage,
|
||||
warning: warningMessage // Include warning in the response data
|
||||
}
|
||||
};
|
||||
} catch (error) {
|
||||
|
||||
@@ -2007,6 +2007,10 @@ function displayAvailableModels(availableModels) {
|
||||
'\n' +
|
||||
chalk.cyan(
|
||||
`4. Run interactive setup: ${chalk.yellow('task-master models --setup')}`
|
||||
) +
|
||||
'\n' +
|
||||
chalk.cyan(
|
||||
`5. Use custom models: ${chalk.yellow('task-master models --custom --set-main|research|fallback <model_id>')}`
|
||||
),
|
||||
{
|
||||
padding: 1,
|
||||
|
||||
@@ -1779,13 +1779,13 @@ export async function generateGoogleObject({
|
||||
### Details:
|
||||
|
||||
|
||||
## 28. Implement `openrouter.js` Provider Module [pending]
|
||||
## 28. Implement `openrouter.js` Provider Module [in-progress]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `openrouter.js` module within `src/ai-providers/`. This module should contain functions to interact with various models via OpenRouter using the **`@openrouter/ai-sdk-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used.
|
||||
### Details:
|
||||
|
||||
|
||||
## 29. Implement `xai.js` Provider Module using Vercel AI SDK [in-progress]
|
||||
## 29. Implement `xai.js` Provider Module using Vercel AI SDK [done]
|
||||
### Dependencies: None
|
||||
### Description: Create and implement the `xai.js` module within `src/ai-providers/`. This module should contain functions to interact with xAI models (e.g., Grok) using the **Vercel AI SDK (`@ai-sdk/xai`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
|
||||
### Details:
|
||||
|
||||
44
tasks/task_073.txt
Normal file
44
tasks/task_073.txt
Normal file
@@ -0,0 +1,44 @@
|
||||
# Task ID: 73
|
||||
# Title: Implement Custom Model ID Support for Ollama/OpenRouter
|
||||
# Status: in-progress
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Allow users to specify custom model IDs for Ollama and OpenRouter providers via CLI flag and interactive setup, with appropriate validation and warnings.
|
||||
# Details:
|
||||
**CLI (`task-master models --set-<role> <id> --custom`):**
|
||||
- Modify `scripts/modules/task-manager/models.js`: `setModel` function.
|
||||
- Check internal `available_models.json` first.
|
||||
- If not found and `--custom` is provided:
|
||||
- Fetch `https://openrouter.ai/api/v1/models`. (Need to add `https` import).
|
||||
- If ID found in OpenRouter list: Set `provider: 'openrouter'`, `modelId: <id>`. Warn user about lack of official validation.
|
||||
- If ID not found in OpenRouter: Assume Ollama. Set `provider: 'ollama'`, `modelId: <id>`. Warn user strongly (model must be pulled, compatibility not guaranteed).
|
||||
- If not found and `--custom` is *not* provided: Fail with error message guiding user to use `--custom`.
|
||||
|
||||
**Interactive Setup (`task-master models --setup`):**
|
||||
- Modify `scripts/modules/commands.js`: `runInteractiveSetup` function.
|
||||
- Add options to `inquirer` choices for each role: `OpenRouter (Enter Custom ID)` and `Ollama (Enter Custom ID)`.
|
||||
- If `__CUSTOM_OPENROUTER__` selected:
|
||||
- Prompt for custom ID.
|
||||
- Fetch OpenRouter list and validate ID exists. Fail setup for that role if not found.
|
||||
- Update config and show warning if found.
|
||||
- If `__CUSTOM_OLLAMA__` selected:
|
||||
- Prompt for custom ID.
|
||||
- Update config directly (no live validation).
|
||||
- Show strong Ollama warning.
|
||||
|
||||
# Test Strategy:
|
||||
**Unit Tests:**
|
||||
- Test `setModel` logic for internal models, custom OpenRouter (valid/invalid), custom Ollama, missing `--custom` flag.
|
||||
- Test `runInteractiveSetup` for new custom options flow, including OpenRouter validation success/failure.
|
||||
|
||||
**Integration Tests:**
|
||||
- Test the `task-master models` command with `--custom` flag variations.
|
||||
- Test the `task-master models --setup` interactive flow for custom options.
|
||||
|
||||
**Manual Testing:**
|
||||
- Run `task-master models --setup` and select custom options.
|
||||
- Run `task-master models --set-main <valid_openrouter_id> --custom`. Verify config and warning.
|
||||
- Run `task-master models --set-main <invalid_openrouter_id> --custom`. Verify error.
|
||||
- Run `task-master models --set-main <ollama_model_id> --custom`. Verify config and warning.
|
||||
- Run `task-master models --set-main <custom_id>` (without `--custom`). Verify error.
|
||||
- Check `getModelConfiguration` output reflects custom models correctly.
|
||||
@@ -3288,7 +3288,7 @@
|
||||
"title": "Implement `openrouter.js` Provider Module",
|
||||
"description": "Create and implement the `openrouter.js` module within `src/ai-providers/`. This module should contain functions to interact with various models via OpenRouter using the **`@openrouter/ai-sdk-provider` library**, adhering to the standardized input/output format defined for `ai-services-unified.js`. Note the specific library used.",
|
||||
"details": "",
|
||||
"status": "pending",
|
||||
"status": "in-progress",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3297,7 +3297,7 @@
|
||||
"title": "Implement `xai.js` Provider Module using Vercel AI SDK",
|
||||
"description": "Create and implement the `xai.js` module within `src/ai-providers/`. This module should contain functions to interact with xAI models (e.g., Grok) using the **Vercel AI SDK (`@ai-sdk/xai`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.",
|
||||
"details": "",
|
||||
"status": "in-progress",
|
||||
"status": "done",
|
||||
"dependencies": [],
|
||||
"parentTaskId": 61
|
||||
},
|
||||
@@ -3909,6 +3909,17 @@
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"subtasks": []
|
||||
},
|
||||
{
|
||||
"id": 73,
|
||||
"title": "Implement Custom Model ID Support for Ollama/OpenRouter",
|
||||
"description": "Allow users to specify custom model IDs for Ollama and OpenRouter providers via CLI flag and interactive setup, with appropriate validation and warnings.",
|
||||
"details": "**CLI (`task-master models --set-<role> <id> --custom`):**\n- Modify `scripts/modules/task-manager/models.js`: `setModel` function.\n- Check internal `available_models.json` first.\n- If not found and `--custom` is provided:\n - Fetch `https://openrouter.ai/api/v1/models`. (Need to add `https` import).\n - If ID found in OpenRouter list: Set `provider: 'openrouter'`, `modelId: <id>`. Warn user about lack of official validation.\n - If ID not found in OpenRouter: Assume Ollama. Set `provider: 'ollama'`, `modelId: <id>`. Warn user strongly (model must be pulled, compatibility not guaranteed).\n- If not found and `--custom` is *not* provided: Fail with error message guiding user to use `--custom`.\n\n**Interactive Setup (`task-master models --setup`):**\n- Modify `scripts/modules/commands.js`: `runInteractiveSetup` function.\n- Add options to `inquirer` choices for each role: `OpenRouter (Enter Custom ID)` and `Ollama (Enter Custom ID)`.\n- If `__CUSTOM_OPENROUTER__` selected:\n - Prompt for custom ID.\n - Fetch OpenRouter list and validate ID exists. Fail setup for that role if not found.\n - Update config and show warning if found.\n- If `__CUSTOM_OLLAMA__` selected:\n - Prompt for custom ID.\n - Update config directly (no live validation).\n - Show strong Ollama warning.",
|
||||
"testStrategy": "**Unit Tests:**\n- Test `setModel` logic for internal models, custom OpenRouter (valid/invalid), custom Ollama, missing `--custom` flag.\n- Test `runInteractiveSetup` for new custom options flow, including OpenRouter validation success/failure.\n\n**Integration Tests:**\n- Test the `task-master models` command with `--custom` flag variations.\n- Test the `task-master models --setup` interactive flow for custom options.\n\n**Manual Testing:**\n- Run `task-master models --setup` and select custom options.\n- Run `task-master models --set-main <valid_openrouter_id> --custom`. Verify config and warning.\n- Run `task-master models --set-main <invalid_openrouter_id> --custom`. Verify error.\n- Run `task-master models --set-main <ollama_model_id> --custom`. Verify config and warning.\n- Run `task-master models --set-main <custom_id>` (without `--custom`). Verify error.\n- Check `getModelConfiguration` output reflects custom models correctly.",
|
||||
"status": "in-progress",
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"subtasks": []
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user