feat(config): Add Fallback Model and Expanded Provider Support

Introduces a configurable fallback model and adds support for additional AI provider API keys in the environment setup.

- **Add Fallback Model Configuration (.taskmasterconfig):**
  - Implemented a new  section in .
  - Configured  as the default fallback model, enhancing resilience if the primary model fails.

- **Update Default Model Configuration (.taskmasterconfig):**
  - Changed the default  model to .
  - Changed the default  model to .

- **Add API Key Examples (assets/env.example):**
  - Added example environment variables for:
    -  (for OpenAI/OpenRouter)
    -  (for Google Gemini)
    -  (for XAI Grok)
  - Included format comments for clarity.
This commit is contained in:
Eyal Toledano
2025-04-16 00:35:30 -04:00
parent 147c41daef
commit 81d5187f9e
12 changed files with 1638 additions and 68 deletions

View File

@@ -0,0 +1,5 @@
---
'task-master-ai': patch
---
Adds model management and new configuration file .taskmasterconfig which houses the models used for main, research and fallback. Adds models command and setter flags. Adds a --setup flag with an interactive setup. We should be calling this during init. Shows a table of active and available models when models is called without flags. Includes SWE scores and token costs, which are manually entered into the supported_models.json, the new place where models are defined for support. Config-manager.js is the core module responsible for managing the new config."

View File

@@ -1,12 +1,16 @@
{ {
"models": { "models": {
"main": { "main": {
"provider": "openai", "provider": "google",
"modelId": "gpt-4o" "modelId": "gemini-2.5-pro-latest"
}, },
"research": { "research": {
"provider": "google", "provider": "perplexity",
"modelId": "gemini-1.5-pro-latest" "modelId": "deep-research"
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-7-sonnet-20250219"
} }
} }
} }

View File

@@ -1,6 +1,9 @@
# Required # Required
ANTHROPIC_API_KEY=your-api-key-here # For most AI ops -- Format: sk-ant-api03-... (Required) ANTHROPIC_API_KEY=your-api-key-here # For most AI ops -- Format: sk-ant-api03-... (Required)
PERPLEXITY_API_KEY=pplx-abcde # For research -- Format: pplx-abcde (Optional, Highly Recommended) PERPLEXITY_API_KEY=pplx-abcde # For research -- Format: pplx-abcde (Optional, Highly Recommended)
OPENAI_API_KEY=sk-proj-... # For OpenAI/OpenRouter models (Optional) -- Format: sk-proj-...
GOOGLE_API_KEY=AIzaSy... # For Google Gemini models (Optional)
GROK_API_KEY=your-grok-api-key-here # For XAI Grok models (Optional)
# Optional - defaults shown # Optional - defaults shown
MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229 (Required) MODEL=claude-3-7-sonnet-20250219 # Recommended models: claude-3-7-sonnet-20250219, claude-3-opus-20240229 (Required)

View File

@@ -11,6 +11,7 @@ import fs from 'fs';
import https from 'https'; import https from 'https';
import inquirer from 'inquirer'; import inquirer from 'inquirer';
import ora from 'ora'; import ora from 'ora';
import Table from 'cli-table3';
import { CONFIG, log, readJSON, writeJSON } from './utils.js'; import { CONFIG, log, readJSON, writeJSON } from './utils.js';
import { import {
@@ -40,6 +41,22 @@ import {
fixDependenciesCommand fixDependenciesCommand
} from './dependency-manager.js'; } from './dependency-manager.js';
import {
getMainModelId,
getResearchModelId,
getFallbackModelId,
setMainModel,
setResearchModel,
setFallbackModel,
getAvailableModels,
VALID_PROVIDERS,
getMainProvider,
getResearchProvider,
getFallbackProvider,
hasApiKeyForProvider,
getMcpApiKeyStatus
} from './config-manager.js';
import { import {
displayBanner, displayBanner,
displayHelp, displayHelp,
@@ -1548,7 +1565,527 @@ function registerCommands(programInstance) {
} }
}); });
// Add more commands as needed... // models command
programInstance
.command('models')
.description('Manage AI model configurations')
.option(
'--set-main <model_id>',
'Set the primary model for task generation/updates'
)
.option(
'--set-research <model_id>',
'Set the model for research-backed operations'
)
.option(
'--set-fallback <model_id>',
'Set the model to use if the primary fails'
)
.option('--setup', 'Run interactive setup to configure models')
.action(async (options) => {
let modelSetAction = false; // Track if any set action was performed
const availableModels = getAvailableModels(); // Get available models once
// Helper to find provider for a given model ID
const findProvider = (modelId) => {
const modelInfo = availableModels.find((m) => m.id === modelId);
return modelInfo?.provider;
};
try {
if (options.setMain) {
const modelId = options.setMain;
if (typeof modelId !== 'string' || modelId.trim() === '') {
console.error(
chalk.red('Error: --set-main flag requires a valid model ID.')
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
)
);
process.exit(1);
}
if (setMainModel(provider, modelId)) {
// Call specific setter
console.log(
chalk.green(
`Main model set to: ${modelId} (Provider: ${provider})`
)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set main model.`));
process.exit(1);
}
}
if (options.setResearch) {
const modelId = options.setResearch;
if (typeof modelId !== 'string' || modelId.trim() === '') {
console.error(
chalk.red('Error: --set-research flag requires a valid model ID.')
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
)
);
process.exit(1);
}
if (setResearchModel(provider, modelId)) {
// Call specific setter
console.log(
chalk.green(
`Research model set to: ${modelId} (Provider: ${provider})`
)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set research model.`));
process.exit(1);
}
}
if (options.setFallback) {
const modelId = options.setFallback;
if (typeof modelId !== 'string' || modelId.trim() === '') {
console.error(
chalk.red('Error: --set-fallback flag requires a valid model ID.')
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
)
);
process.exit(1);
}
if (setFallbackModel(provider, modelId)) {
// Call specific setter
console.log(
chalk.green(
`Fallback model set to: ${modelId} (Provider: ${provider})`
)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set fallback model.`));
process.exit(1);
}
}
// Handle interactive setup first
if (options.setup) {
console.log(chalk.cyan.bold('\nInteractive Model Setup:'));
// Filter out placeholder models for selection
const selectableModels = availableModels
.filter(
(model) => !(model.id.startsWith('[') && model.id.endsWith(']'))
)
.map((model) => ({
name: `${model.provider} / ${model.id}`,
value: { provider: model.provider, id: model.id }
}));
if (selectableModels.length === 0) {
console.error(
chalk.red('Error: No selectable models found in configuration.')
);
process.exit(1);
}
const answers = await inquirer.prompt([
{
type: 'list',
name: 'mainModel',
message: 'Select the main model for generation/updates:',
choices: selectableModels,
default: selectableModels.findIndex(
(m) => m.value.id === getMainModelId()
)
},
{
type: 'list',
name: 'researchModel',
message: 'Select the research model:',
// Filter choices to only include models allowed for research
choices: selectableModels.filter((modelChoice) => {
// Need to find the original model data to check allowed_roles
const originalModel = availableModels.find(
(m) => m.id === modelChoice.value.id
);
return originalModel?.allowed_roles?.includes('research');
}),
default: selectableModels.findIndex(
(m) => m.value.id === getResearchModelId()
)
},
{
type: 'list',
name: 'fallbackModel',
message: 'Select the fallback model (optional):',
choices: [
{ name: 'None (disable fallback)', value: null },
new inquirer.Separator(),
...selectableModels
],
default:
selectableModels.findIndex(
(m) => m.value.id === getFallbackModelId()
) + 2 // Adjust for separator and None
}
]);
let setupSuccess = true;
// Set Main Model
if (answers.mainModel) {
if (
!setMainModel(answers.mainModel.provider, answers.mainModel.id)
) {
console.error(chalk.red('Failed to set main model.'));
setupSuccess = false;
} else {
// Success message printed by setMainModel
}
}
// Set Research Model
if (answers.researchModel) {
if (
!setResearchModel(
answers.researchModel.provider,
answers.researchModel.id
)
) {
console.error(chalk.red('Failed to set research model.'));
setupSuccess = false;
} else {
// Success message printed by setResearchModel
}
}
// Set Fallback Model
if (answers.fallbackModel) {
if (
!setFallbackModel(
answers.fallbackModel.provider,
answers.fallbackModel.id
)
) {
console.error(chalk.red('Failed to set fallback model.'));
setupSuccess = false;
} else {
console.log(
chalk.green(
`Fallback model set to: ${answers.fallbackModel.provider} / ${answers.fallbackModel.id}`
)
);
}
} else {
// User selected None - attempt to remove fallback from config
const config = readConfig();
if (config.models.fallback) {
delete config.models.fallback;
if (!writeConfig(config)) {
console.error(
chalk.red('Failed to remove fallback model configuration.')
);
setupSuccess = false;
} else {
console.log(chalk.green('Fallback model disabled.'));
}
}
}
if (setupSuccess) {
console.log(chalk.green.bold('\nModel setup complete!'));
}
return; // Exit after setup
}
// If no set flags were used and not in setup mode, list the models
if (!modelSetAction && !options.setup) {
// Fetch current settings
const mainProvider = getMainProvider();
const mainModelId = getMainModelId();
const researchProvider = getResearchProvider();
const researchModelId = getResearchModelId();
const fallbackProvider = getFallbackProvider(); // May be undefined
const fallbackModelId = getFallbackModelId(); // May be undefined
// Check API keys for both CLI (.env) and MCP (mcp.json)
const mainCliKeyOk = hasApiKeyForProvider(mainProvider);
const mainMcpKeyOk = getMcpApiKeyStatus(mainProvider);
const researchCliKeyOk = hasApiKeyForProvider(researchProvider);
const researchMcpKeyOk = getMcpApiKeyStatus(researchProvider);
const fallbackCliKeyOk = fallbackProvider
? hasApiKeyForProvider(fallbackProvider)
: true; // No key needed if no fallback is set
const fallbackMcpKeyOk = fallbackProvider
? getMcpApiKeyStatus(fallbackProvider)
: true; // No key needed if no fallback is set
// --- Generate Warning Messages ---
const warnings = [];
if (!mainCliKeyOk || !mainMcpKeyOk) {
warnings.push(
`Main model (${mainProvider}): API key missing for ${!mainCliKeyOk ? 'CLI (.env)' : ''}${!mainCliKeyOk && !mainMcpKeyOk ? ' / ' : ''}${!mainMcpKeyOk ? 'MCP (.cursor/mcp.json)' : ''}`
);
}
if (!researchCliKeyOk || !researchMcpKeyOk) {
warnings.push(
`Research model (${researchProvider}): API key missing for ${!researchCliKeyOk ? 'CLI (.env)' : ''}${!researchCliKeyOk && !researchMcpKeyOk ? ' / ' : ''}${!researchMcpKeyOk ? 'MCP (.cursor/mcp.json)' : ''}`
);
}
if (fallbackProvider && (!fallbackCliKeyOk || !fallbackMcpKeyOk)) {
warnings.push(
`Fallback model (${fallbackProvider}): API key missing for ${!fallbackCliKeyOk ? 'CLI (.env)' : ''}${!fallbackCliKeyOk && !fallbackMcpKeyOk ? ' / ' : ''}${!fallbackMcpKeyOk ? 'MCP (.cursor/mcp.json)' : ''}`
);
}
// --- Display Warning Banner (if any) ---
if (warnings.length > 0) {
console.log(
boxen(
chalk.red.bold('API Key Warnings:') +
'\n\n' +
warnings.join('\n'),
{
padding: 1,
margin: { top: 1, bottom: 1 },
borderColor: 'red',
borderStyle: 'round'
}
)
);
}
// --- Active Configuration Section ---
console.log(chalk.cyan.bold('\nActive Model Configuration:'));
const activeTable = new Table({
head: [
'Role',
'Provider',
'Model ID',
'SWE Score', // Update column name
'Cost ($/1M tkns)', // Add Cost column
'API Key Status'
].map((h) => chalk.cyan.bold(h)),
colWidths: [10, 14, 30, 18, 20, 28], // Adjust widths for stars
style: { head: ['cyan', 'bold'] }
});
const allAvailableModels = getAvailableModels(); // Get all models once for lookup
// --- Calculate Tertile Thresholds for SWE Scores ---
const validScores = allAvailableModels
.map((m) => m.swe_score)
.filter((s) => s !== null && s !== undefined && s > 0);
const sortedScores = [...validScores].sort((a, b) => b - a); // Sort descending
const n = sortedScores.length;
let minScore3Stars = -Infinity;
let minScore2Stars = -Infinity;
if (n > 0) {
const topThirdIndex = Math.max(0, Math.floor(n / 3) - 1);
const midThirdIndex = Math.max(0, Math.floor((2 * n) / 3) - 1);
minScore3Stars = sortedScores[topThirdIndex];
minScore2Stars = sortedScores[midThirdIndex];
}
// Helper to find the full model object
const findModelData = (modelId) => {
return allAvailableModels.find((m) => m.id === modelId);
};
// --- Helper to format SWE score and add tertile stars ---
const formatSweScoreWithTertileStars = (score) => {
if (score === null || score === undefined || score <= 0)
return 'N/A'; // Handle non-positive scores
const formattedPercentage = `${(score * 100).toFixed(1)}%`;
let stars = '';
if (n === 0) {
// No valid scores to compare against
stars = chalk.gray('☆☆☆');
} else if (score >= minScore3Stars) {
stars = chalk.yellow('★★★'); // Top Third
} else if (score >= minScore2Stars) {
stars = chalk.yellow('★★') + chalk.gray('☆'); // Middle Third
} else {
stars = chalk.yellow('★') + chalk.gray('☆☆'); // Bottom Third (but > 0)
}
return `${formattedPercentage} ${stars}`;
};
// Helper to format cost
const formatCost = (costObj) => {
if (!costObj) return 'N/A';
const formatSingleCost = (costValue) => {
if (costValue === null || costValue === undefined) return 'N/A';
// Check if the number is an integer
const isInteger = Number.isInteger(costValue);
return `$${costValue.toFixed(isInteger ? 0 : 2)}`;
};
const inputCost = formatSingleCost(costObj.input);
const outputCost = formatSingleCost(costObj.output);
return `${inputCost} in, ${outputCost} out`; // Use cleaner separator
};
const getCombinedStatus = (cliOk, mcpOk) => {
const cliSymbol = cliOk ? chalk.green('✓') : chalk.red('✗');
const mcpSymbol = mcpOk ? chalk.green('✓') : chalk.red('✗');
if (cliOk && mcpOk) {
// Both symbols green, default text color
return `${cliSymbol} CLI & ${mcpSymbol} MCP OK`;
} else if (cliOk && !mcpOk) {
// Symbols colored individually, default text color
return `${cliSymbol} CLI OK / ${mcpSymbol} MCP Missing`;
} else if (!cliOk && mcpOk) {
// Symbols colored individually, default text color
return `${cliSymbol} CLI Missing / ${mcpSymbol} MCP OK`;
} else {
// Both symbols gray, apply overall gray to text as well
return chalk.gray(`${cliSymbol} CLI & MCP Both Missing`);
}
};
const mainModelData = findModelData(mainModelId);
const researchModelData = findModelData(researchModelId);
const fallbackModelData = findModelData(fallbackModelId);
activeTable.push([
chalk.white('Main'),
mainProvider,
mainModelId,
formatSweScoreWithTertileStars(mainModelData?.swe_score), // Use tertile formatter
formatCost(mainModelData?.cost_per_1m_tokens),
getCombinedStatus(mainCliKeyOk, mainMcpKeyOk)
]);
activeTable.push([
chalk.white('Research'),
researchProvider,
researchModelId,
formatSweScoreWithTertileStars(researchModelData?.swe_score), // Use tertile formatter
formatCost(researchModelData?.cost_per_1m_tokens),
getCombinedStatus(researchCliKeyOk, researchMcpKeyOk)
]);
if (fallbackProvider && fallbackModelId) {
activeTable.push([
chalk.white('Fallback'),
fallbackProvider,
fallbackModelId,
formatSweScoreWithTertileStars(fallbackModelData?.swe_score), // Use tertile formatter
formatCost(fallbackModelData?.cost_per_1m_tokens),
getCombinedStatus(fallbackCliKeyOk, fallbackMcpKeyOk)
]);
}
console.log(activeTable.toString());
// --- Available Models Section ---
// const availableModels = getAvailableModels(); // Already fetched
if (!allAvailableModels || allAvailableModels.length === 0) {
console.log(chalk.yellow('\nNo available models defined.'));
return;
}
// Filter out placeholders and active models for the available list
const activeIds = [
mainModelId,
researchModelId,
fallbackModelId
].filter(Boolean);
const filteredAvailable = allAvailableModels.filter(
(model) =>
!(model.id.startsWith('[') && model.id.endsWith(']')) &&
!activeIds.includes(model.id)
);
if (filteredAvailable.length > 0) {
console.log(chalk.cyan.bold('\nOther Available Models:'));
const availableTable = new Table({
head: [
'Provider',
'Model ID',
'SWE Score', // Update column name
'Cost ($/1M tkns)' // Add Cost column
].map((h) => chalk.cyan.bold(h)),
colWidths: [15, 40, 18, 25], // Adjust widths for stars
style: { head: ['cyan', 'bold'] }
});
filteredAvailable.forEach((model) => {
availableTable.push([
model.provider || 'N/A',
model.id,
formatSweScoreWithTertileStars(model.swe_score), // Use tertile formatter
formatCost(model.cost_per_1m_tokens)
]);
});
console.log(availableTable.toString());
} else {
console.log(
chalk.gray('\n(All available models are currently configured)')
);
}
// --- Suggested Actions Section ---
console.log(
boxen(
chalk.white.bold('Next Steps:') +
'\n' +
chalk.cyan(
`1. Set main model: ${chalk.yellow('task-master models --set-main <model_id>')}`
) +
'\n' +
chalk.cyan(
`2. Set research model: ${chalk.yellow('task-master models --set-research <model_id>')}`
) +
'\n' +
chalk.cyan(
`3. Set fallback model: ${chalk.yellow('task-master models --set-fallback <model_id>')}`
) +
'\n' +
chalk.cyan(
`4. Run interactive setup: ${chalk.yellow('task-master models --setup')}`
),
{
padding: 1,
borderColor: 'yellow',
borderStyle: 'round',
margin: { top: 1 }
}
)
);
}
} catch (error) {
log(`Error processing models command: ${error.message}`, 'error');
if (error.stack && CONFIG.debug) {
log(error.stack, 'debug');
}
process.exit(1);
}
});
return programInstance; return programInstance;
} }

View File

@@ -1,6 +1,30 @@
import fs from 'fs'; import fs from 'fs';
import path from 'path'; import path from 'path';
import chalk from 'chalk'; import chalk from 'chalk';
import { fileURLToPath } from 'url';
// Calculate __dirname in ESM
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Load supported models from JSON file using the calculated __dirname
let MODEL_MAP;
try {
const supportedModelsRaw = fs.readFileSync(
path.join(__dirname, 'supported-models.json'),
'utf-8'
);
MODEL_MAP = JSON.parse(supportedModelsRaw);
} catch (error) {
console.error(
chalk.red(
'FATAL ERROR: Could not load supported-models.json. Please ensure the file exists and is valid JSON.'
),
error
);
MODEL_MAP = {}; // Default to empty map on error to avoid crashing, though functionality will be limited
process.exit(1); // Exit if models can't be loaded
}
const CONFIG_FILE_NAME = '.taskmasterconfig'; const CONFIG_FILE_NAME = '.taskmasterconfig';
@@ -21,17 +45,6 @@ const VALID_PROVIDERS = [
'grok' 'grok'
]; ];
// Optional: Define known models per provider primarily for informational display or non-blocking warnings
const MODEL_MAP = {
anthropic: ['claude-3.5-sonnet-20240620', 'claude-3-7-sonnet-20250219'],
openai: ['gpt-4o', 'gpt-4-turbo'],
google: ['gemini-2.5-pro-latest', 'gemini-1.5-flash-latest'],
perplexity: ['sonar-pro', 'sonar-mini'],
ollama: [], // Users configure specific Ollama models locally
openrouter: [], // Users specify model string
grok: [] // Specify Grok model if known
};
let projectRoot = null; let projectRoot = null;
function findProjectRoot() { function findProjectRoot() {
@@ -106,11 +119,16 @@ function readConfig(explicitRoot = null) {
modelId: modelId:
parsedConfig?.models?.research?.modelId ?? parsedConfig?.models?.research?.modelId ??
defaults.models.research.modelId defaults.models.research.modelId
},
// Add merge logic for the fallback model
fallback: {
provider: parsedConfig?.models?.fallback?.provider,
modelId: parsedConfig?.models?.fallback?.modelId
} }
} }
}; };
// Validate loaded provider (no longer split by main/research) // Validate loaded providers (main, research, and fallback if it exists)
if (!validateProvider(config.models.main.provider)) { if (!validateProvider(config.models.main.provider)) {
console.warn( console.warn(
chalk.yellow( chalk.yellow(
@@ -139,6 +157,21 @@ function readConfig(explicitRoot = null) {
// Optional: Add warning for model combination if desired, but don't block // Optional: Add warning for model combination if desired, but don't block
// else if (!validateProviderModelCombination(config.models.research.provider, config.models.research.modelId)) { ... } // else if (!validateProviderModelCombination(config.models.research.provider, config.models.research.modelId)) { ... }
// Add validation for fallback provider if it exists
if (
config.models.fallback &&
config.models.fallback.provider &&
!validateProvider(config.models.fallback.provider)
) {
console.warn(
chalk.yellow(
`Warning: Invalid fallback provider "${config.models.fallback.provider}" in ${CONFIG_FILE_NAME}. Fallback model will be ignored.`
)
);
// Unlike main/research, we don't set a default fallback, just ignore it
delete config.models.fallback;
}
return config; return config;
} catch (error) { } catch (error) {
console.error( console.error(
@@ -177,7 +210,8 @@ function validateProviderModelCombination(providerName, modelId) {
// If the provider is known, check if the model is in its list OR if the list is empty (meaning accept any) // If the provider is known, check if the model is in its list OR if the list is empty (meaning accept any)
return ( return (
MODEL_MAP[providerName].length === 0 || MODEL_MAP[providerName].length === 0 ||
MODEL_MAP[providerName].includes(modelId) // Use .some() to check the 'id' property of objects in the array
MODEL_MAP[providerName].some((modelObj) => modelObj.id === modelId)
); );
} }
@@ -221,6 +255,26 @@ function getResearchModelId(explicitRoot = null) {
return config.models.research.modelId; return config.models.research.modelId;
} }
/**
* Gets the currently configured fallback AI provider.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|undefined} The name of the fallback provider, or undefined if not set.
*/
function getFallbackProvider(explicitRoot = null) {
const config = readConfig(explicitRoot);
return config.models?.fallback?.provider;
}
/**
* Gets the currently configured fallback AI model ID.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {string|undefined} The ID of the fallback model, or undefined if not set.
*/
function getFallbackModelId(explicitRoot = null) {
const config = readConfig(explicitRoot);
return config.models?.fallback?.modelId;
}
/** /**
* Sets the main AI model (provider and modelId) in the configuration file. * Sets the main AI model (provider and modelId) in the configuration file.
* @param {string} providerName The name of the provider to set. * @param {string} providerName The name of the provider to set.
@@ -229,6 +283,7 @@ function getResearchModelId(explicitRoot = null) {
* @returns {boolean} True if successful, false otherwise. * @returns {boolean} True if successful, false otherwise.
*/ */
function setMainModel(providerName, modelId, explicitRoot = null) { function setMainModel(providerName, modelId, explicitRoot = null) {
// --- 1. Validate Provider First ---
if (!validateProvider(providerName)) { if (!validateProvider(providerName)) {
console.error( console.error(
chalk.red(`Error: "${providerName}" is not a valid provider.`) chalk.red(`Error: "${providerName}" is not a valid provider.`)
@@ -238,6 +293,35 @@ function setMainModel(providerName, modelId, explicitRoot = null) {
); );
return false; return false;
} }
// --- 2. Validate Role Second ---
const allModels = getAvailableModels(); // Get all models to check roles
const modelData = allModels.find(
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('main')
) {
console.error(
chalk.red(`Error: Model "${modelId}" is not allowed for the 'main' role.`)
);
// Try to suggest valid models for the role
const allowedMainModels = allModels
.filter((m) => m.allowed_roles?.includes('main'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedMainModels) {
console.log(
chalk.yellow('\nAllowed models for main role:\n' + allowedMainModels)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) { if (!validateProviderModelCombination(providerName, modelId)) {
console.warn( console.warn(
chalk.yellow( chalk.yellow(
@@ -246,7 +330,7 @@ function setMainModel(providerName, modelId, explicitRoot = null) {
); );
} }
// Pass explicitRoot down // --- Proceed with setting ---
const config = readConfig(explicitRoot); const config = readConfig(explicitRoot);
config.models.main = { provider: providerName, modelId: modelId }; config.models.main = { provider: providerName, modelId: modelId };
// Pass explicitRoot down // Pass explicitRoot down
@@ -268,6 +352,7 @@ function setMainModel(providerName, modelId, explicitRoot = null) {
* @returns {boolean} True if successful, false otherwise. * @returns {boolean} True if successful, false otherwise.
*/ */
function setResearchModel(providerName, modelId, explicitRoot = null) { function setResearchModel(providerName, modelId, explicitRoot = null) {
// --- 1. Validate Provider First ---
if (!validateProvider(providerName)) { if (!validateProvider(providerName)) {
console.error( console.error(
chalk.red(`Error: "${providerName}" is not a valid provider.`) chalk.red(`Error: "${providerName}" is not a valid provider.`)
@@ -277,6 +362,39 @@ function setResearchModel(providerName, modelId, explicitRoot = null) {
); );
return false; return false;
} }
// --- 2. Validate Role Second ---
const allModels = getAvailableModels(); // Get all models to check roles
const modelData = allModels.find(
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('research')
) {
console.error(
chalk.red(
`Error: Model "${modelId}" is not allowed for the 'research' role.`
)
);
// Try to suggest valid models for the role
const allowedResearchModels = allModels
.filter((m) => m.allowed_roles?.includes('research'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedResearchModels) {
console.log(
chalk.yellow(
'\nAllowed models for research role:\n' + allowedResearchModels
)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) { if (!validateProviderModelCombination(providerName, modelId)) {
console.warn( console.warn(
chalk.yellow( chalk.yellow(
@@ -284,6 +402,8 @@ function setResearchModel(providerName, modelId, explicitRoot = null) {
) )
); );
} }
// --- 4. Specific Research Warning (Optional) ---
if ( if (
providerName === 'anthropic' || providerName === 'anthropic' ||
(providerName === 'openai' && modelId.includes('3.5')) (providerName === 'openai' && modelId.includes('3.5'))
@@ -295,7 +415,7 @@ function setResearchModel(providerName, modelId, explicitRoot = null) {
); );
} }
// Pass explicitRoot down // --- Proceed with setting ---
const config = readConfig(explicitRoot); const config = readConfig(explicitRoot);
config.models.research = { provider: providerName, modelId: modelId }; config.models.research = { provider: providerName, modelId: modelId };
// Pass explicitRoot down // Pass explicitRoot down
@@ -309,37 +429,257 @@ function setResearchModel(providerName, modelId, explicitRoot = null) {
} }
} }
/**
* Sets the fallback AI model (provider and modelId) in the configuration file.
* @param {string} providerName The name of the provider to set.
* @param {string} modelId The ID of the model to set.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function setFallbackModel(providerName, modelId, explicitRoot = null) {
// --- 1. Validate Provider First ---
if (!validateProvider(providerName)) {
console.error(
chalk.red(`Error: "${providerName}" is not a valid provider.`)
);
console.log(
chalk.yellow(`Available providers: ${VALID_PROVIDERS.join(', ')}`)
);
return false;
}
// --- 2. Validate Role Second ---
const allModels = getAvailableModels(); // Get all models to check roles
const modelData = allModels.find(
(m) => m.id === modelId && m.provider === providerName
);
if (
!modelData ||
!modelData.allowed_roles ||
!modelData.allowed_roles.includes('fallback')
) {
console.error(
chalk.red(
`Error: Model "${modelId}" is not allowed for the 'fallback' role.`
)
);
// Try to suggest valid models for the role
const allowedFallbackModels = allModels
.filter((m) => m.allowed_roles?.includes('fallback'))
.map((m) => ` - ${m.provider} / ${m.id}`)
.join('\n');
if (allowedFallbackModels) {
console.log(
chalk.yellow(
'\nAllowed models for fallback role:\n' + allowedFallbackModels
)
);
}
return false;
}
// --- 3. Validate Model Combination (Optional Warning) ---
if (!validateProviderModelCombination(providerName, modelId)) {
console.warn(
chalk.yellow(
`Warning: Model "${modelId}" is not in the known list for provider "${providerName}". Ensure it is valid.`
)
);
}
// --- Proceed with setting ---
const config = readConfig(explicitRoot);
if (!config.models) {
config.models = {}; // Ensure models object exists
}
// Ensure fallback object exists
if (!config.models.fallback) {
config.models.fallback = {};
}
config.models.fallback = { provider: providerName, modelId: modelId };
return writeConfig(config, explicitRoot);
}
/**
* Gets a list of available models based on the MODEL_MAP.
* @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}
*/
function getAvailableModels() {
const available = [];
for (const [provider, models] of Object.entries(MODEL_MAP)) {
if (models.length > 0) {
models.forEach((modelObj) => {
// Basic name generation - can be improved
const modelId = modelObj.id;
const sweScore = modelObj.swe_score;
const cost = modelObj.cost_per_1m_tokens;
const allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];
const nameParts = modelId
.split('-')
.map((p) => p.charAt(0).toUpperCase() + p.slice(1));
// Handle specific known names better if needed
let name = nameParts.join(' ');
if (modelId === 'claude-3.5-sonnet-20240620')
name = 'Claude 3.5 Sonnet';
if (modelId === 'claude-3-7-sonnet-20250219')
name = 'Claude 3.7 Sonnet';
if (modelId === 'gpt-4o') name = 'GPT-4o';
if (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';
if (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';
if (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';
available.push({
id: modelId,
name: name,
provider: provider,
swe_score: sweScore,
cost_per_1m_tokens: cost,
allowed_roles: allowedRoles
});
});
} else {
// For providers with empty lists (like ollama), maybe add a placeholder or skip
available.push({
id: `[${provider}-any]`,
name: `Any (${provider})`,
provider: provider
});
}
}
return available;
}
/**
* Writes the configuration object to the file.
* @param {Object} config The configuration object to write.
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {boolean} True if successful, false otherwise.
*/
function writeConfig(config, explicitRoot = null) { function writeConfig(config, explicitRoot = null) {
// Determine the root path to use const rootPath = explicitRoot || findProjectRoot();
const rootToUse = explicitRoot || findProjectRoot(); if (!rootPath) {
if (!rootToUse) {
console.error( console.error(
chalk.red( chalk.red(
'Error: Could not determine project root to write configuration.' 'Error: Could not determine project root. Configuration not saved.'
)
);
return false;
}
const configPath = path.join(rootToUse, CONFIG_FILE_NAME);
// Check if file exists, as expected by tests
if (!fs.existsSync(configPath)) {
console.error(
chalk.red(
`Error: ${CONFIG_FILE_NAME} does not exist. Create it first or initialize project.`
) )
); );
return false; return false;
} }
// Ensure we don't double-join if explicitRoot already contains the filename
const configPath =
path.basename(rootPath) === CONFIG_FILE_NAME
? rootPath
: path.join(rootPath, CONFIG_FILE_NAME);
try { try {
// Added 'utf-8' encoding fs.writeFileSync(configPath, JSON.stringify(config, null, 2));
fs.writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf-8');
return true; return true;
} catch (error) { } catch (error) {
console.error( console.error(
chalk.red(`Error writing to ${configPath}: ${error.message}.`) chalk.red(
`Error writing configuration to ${configPath}: ${error.message}`
)
);
return false;
}
}
/**
* Checks if the required API key environment variable is set for a given provider.
* @param {string} providerName The name of the provider.
* @returns {boolean} True if the API key environment variable exists and is non-empty, false otherwise.
*/
function hasApiKeyForProvider(providerName) {
switch (providerName) {
case 'anthropic':
return !!process.env.ANTHROPIC_API_KEY;
case 'openai':
case 'openrouter': // OpenRouter uses OpenAI-compatible key
return !!process.env.OPENAI_API_KEY;
case 'google':
return !!process.env.GOOGLE_API_KEY;
case 'perplexity':
return !!process.env.PERPLEXITY_API_KEY;
case 'grok':
case 'xai': // Added alias for Grok
return !!process.env.GROK_API_KEY;
case 'ollama':
return true; // Ollama runs locally, no cloud API key needed
default:
return false; // Unknown provider cannot have a key checked
}
}
/**
* Checks the API key status within .cursor/mcp.json for a given provider.
* Reads the mcp.json file, finds the taskmaster-ai server config, and checks the relevant env var.
* @param {string} providerName The name of the provider.
* @returns {boolean} True if the key exists and is not a placeholder, false otherwise.
*/
function getMcpApiKeyStatus(providerName) {
const rootDir = findProjectRoot(); // Use existing root finding
if (!rootDir) {
console.warn(
chalk.yellow('Warning: Could not find project root to check mcp.json.')
);
return false; // Cannot check without root
}
const mcpConfigPath = path.join(rootDir, '.cursor', 'mcp.json');
if (!fs.existsSync(mcpConfigPath)) {
// console.warn(chalk.yellow('Warning: .cursor/mcp.json not found.'));
return false; // File doesn't exist
}
try {
const mcpConfigRaw = fs.readFileSync(mcpConfigPath, 'utf-8');
const mcpConfig = JSON.parse(mcpConfigRaw);
const mcpEnv = mcpConfig?.mcpServers?.['taskmaster-ai']?.env;
if (!mcpEnv) {
// console.warn(chalk.yellow('Warning: Could not find taskmaster-ai env in mcp.json.'));
return false; // Structure missing
}
let apiKeyToCheck = null;
let placeholderValue = null;
switch (providerName) {
case 'anthropic':
apiKeyToCheck = mcpEnv.ANTHROPIC_API_KEY;
placeholderValue = 'YOUR_ANTHROPIC_API_KEY_HERE';
break;
case 'openai':
case 'openrouter':
apiKeyToCheck = mcpEnv.OPENAI_API_KEY;
placeholderValue = 'YOUR_OPENAI_API_KEY_HERE'; // Assuming placeholder matches OPENAI
break;
case 'google':
apiKeyToCheck = mcpEnv.GOOGLE_API_KEY;
placeholderValue = 'YOUR_GOOGLE_API_KEY_HERE';
break;
case 'perplexity':
apiKeyToCheck = mcpEnv.PERPLEXITY_API_KEY;
placeholderValue = 'YOUR_PERPLEXITY_API_KEY_HERE';
break;
case 'grok':
case 'xai':
apiKeyToCheck = mcpEnv.GROK_API_KEY;
placeholderValue = 'YOUR_GROK_API_KEY_HERE';
break;
case 'ollama':
return true; // No key needed
default:
return false; // Unknown provider
}
return !!apiKeyToCheck && apiKeyToCheck !== placeholderValue;
} catch (error) {
console.error(
chalk.red(`Error reading or parsing .cursor/mcp.json: ${error.message}`)
); );
return false; return false;
} }
@@ -355,8 +695,14 @@ export {
getMainModelId, getMainModelId,
getResearchProvider, getResearchProvider,
getResearchModelId, getResearchModelId,
getFallbackProvider,
getFallbackModelId,
setMainModel, setMainModel,
setResearchModel, setResearchModel,
setFallbackModel,
VALID_PROVIDERS, VALID_PROVIDERS,
MODEL_MAP MODEL_MAP,
getAvailableModels,
hasApiKeyForProvider,
getMcpApiKeyStatus
}; };

View File

@@ -0,0 +1,256 @@
{
"anthropic": [
{
"id": "claude-3.5-sonnet-20240620",
"swe_score": 0.49,
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "claude-3-7-sonnet-20250219",
"swe_score": 0.623,
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "claude-3.5-haiku-20241022",
"swe_score": 0.406,
"cost_per_1m_tokens": { "input": 0.8, "output": 4.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "claude-3-haiku-20240307",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 0.25, "output": 1.25 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "claude-3-opus-20240229",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
}
],
"openai": [
{
"id": "gpt-4o",
"swe_score": 0.332,
"cost_per_1m_tokens": { "input": 5.0, "output": 15.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gpt-4-turbo",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 10.0, "output": 30.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "o1",
"swe_score": 0.489,
"cost_per_1m_tokens": { "input": 15.0, "output": 60.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "o3-mini",
"swe_score": 0.493,
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "o1-pro",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 150.0, "output": 600.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gpt-4.1",
"swe_score": 0.55,
"cost_per_1m_tokens": { "input": 2.0, "output": 8.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gpt-4.5-preview",
"swe_score": 0.38,
"cost_per_1m_tokens": { "input": 75.0, "output": 150.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gpt-4.1-mini",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 0.4, "output": 1.6 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gpt-4.1-nano",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 0.1, "output": 0.4 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gpt-3.5-turbo",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 0.5, "output": 1.5 },
"allowed_roles": ["main", "fallback"]
}
],
"google": [
{
"id": "gemini-2.5-pro-latest",
"swe_score": 0.638,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "gemini-1.5-flash-latest",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "gemini-2.0-flash-experimental",
"swe_score": 0.754,
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gemini-2.0-flash-thinking-experimental",
"swe_score": 0.754,
"cost_per_1m_tokens": { "input": 0.15, "output": 0.6 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gemini-2.0-pro",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "gemma-3-7b",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
}
],
"perplexity": [
{
"id": "sonar-pro",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "sonar-mini",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "deep-research",
"swe_score": 0.211,
"cost_per_1m_tokens": { "input": 2.0, "output": 8.0 },
"allowed_roles": ["main", "fallback", "research"]
}
],
"ollama": [
{
"id": "llava",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "deepseek-coder-v2",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "dolphin3",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "olmo2-7b",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "olmo2-13b",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
}
],
"openrouter": [
{
"id": "meta-llama/llama-4-scout",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "google/gemini-2.5-pro-exp-03-25",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "openrouter/optimus-alpha",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 30.0, "output": 60.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "openrouter/quasar-alpha",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "kimi-vl-a3b-thinking",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "qwen2.5-max",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
}
],
"grok": [
{
"id": "grok3-beta",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback", "research"]
},
{
"id": "grok-3-mini",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "grok-2",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "grok-2-mini",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
},
{
"id": "grok-1.5",
"swe_score": 0,
"cost_per_1m_tokens": null,
"allowed_roles": ["main", "fallback"]
}
]
}

View File

@@ -1,6 +1,6 @@
# Task ID: 61 # Task ID: 61
# Title: Implement Flexible AI Model Management # Title: Implement Flexible AI Model Management
# Status: pending # Status: in-progress
# Dependencies: None # Dependencies: None
# Priority: high # Priority: high
# Description: Currently, Task Master only supports Claude for main operations and Perplexity for research. Users are limited in flexibility when managing AI models. Adding comprehensive support for multiple popular AI models (OpenAI, Ollama, Gemini, OpenRouter, Grok) and providing intuitive CLI commands for model management will significantly enhance usability, transparency, and adaptability to user preferences and project-specific needs. This task will now leverage Vercel's AI SDK to streamline integration and management of these models. # Description: Currently, Task Master only supports Claude for main operations and Perplexity for research. Users are limited in flexibility when managing AI models. Adding comprehensive support for multiple popular AI models (OpenAI, Ollama, Gemini, OpenRouter, Grok) and providing intuitive CLI commands for model management will significantly enhance usability, transparency, and adaptability to user preferences and project-specific needs. This task will now leverage Vercel's AI SDK to streamline integration and management of these models.
@@ -142,7 +142,7 @@ export function getClient(model) {
- Test compatibility with serverless and edge deployments. - Test compatibility with serverless and edge deployments.
# Subtasks: # Subtasks:
## 1. Create Configuration Management Module [in-progress] ## 1. Create Configuration Management Module [done]
### Dependencies: None ### Dependencies: None
### Description: Develop a centralized configuration module to manage AI model settings and preferences, leveraging the Strategy pattern for model selection. ### Description: Develop a centralized configuration module to manage AI model settings and preferences, leveraging the Strategy pattern for model selection.
### Details: ### Details:
@@ -428,7 +428,7 @@ describe('AI Client Factory', () => {
7. Add support for optional configuration parameters for each model 7. Add support for optional configuration parameters for each model
8. Testing approach: Create tests that verify environment variable validation logic 8. Testing approach: Create tests that verify environment variable validation logic
## 6. Implement Model Listing Command [pending] ## 6. Implement Model Listing Command [done]
### Dependencies: 61.1, 61.2, 61.4 ### Dependencies: 61.1, 61.2, 61.4
### Description: Implement the 'task-master models' command to display currently configured models and available options. ### Description: Implement the 'task-master models' command to display currently configured models and available options.
### Details: ### Details:
@@ -441,7 +441,7 @@ describe('AI Client Factory', () => {
7. Add support for verbose output with additional details 7. Add support for verbose output with additional details
8. Testing approach: Create integration tests that verify correct output formatting and content 8. Testing approach: Create integration tests that verify correct output formatting and content
## 7. Implement Model Setting Commands [pending] ## 7. Implement Model Setting Commands [done]
### Dependencies: 61.1, 61.2, 61.4, 61.6 ### Dependencies: 61.1, 61.2, 61.4, 61.6
### Description: Implement the commands to set main and research models with proper validation and feedback. ### Description: Implement the commands to set main and research models with proper validation and feedback.
### Details: ### Details:

View File

@@ -2743,7 +2743,7 @@
"description": "Currently, Task Master only supports Claude for main operations and Perplexity for research. Users are limited in flexibility when managing AI models. Adding comprehensive support for multiple popular AI models (OpenAI, Ollama, Gemini, OpenRouter, Grok) and providing intuitive CLI commands for model management will significantly enhance usability, transparency, and adaptability to user preferences and project-specific needs. This task will now leverage Vercel's AI SDK to streamline integration and management of these models.", "description": "Currently, Task Master only supports Claude for main operations and Perplexity for research. Users are limited in flexibility when managing AI models. Adding comprehensive support for multiple popular AI models (OpenAI, Ollama, Gemini, OpenRouter, Grok) and providing intuitive CLI commands for model management will significantly enhance usability, transparency, and adaptability to user preferences and project-specific needs. This task will now leverage Vercel's AI SDK to streamline integration and management of these models.",
"details": "### Proposed Solution\nImplement an intuitive CLI command for AI model management, leveraging Vercel's AI SDK for seamless integration:\n\n- `task-master models`: Lists currently configured models for main operations and research.\n- `task-master models --set-main=\"<model_name>\" --set-research=\"<model_name>\"`: Sets the desired models for main operations and research tasks respectively.\n\nSupported AI Models:\n- **Main Operations:** Claude (current default), OpenAI, Ollama, Gemini, OpenRouter\n- **Research Operations:** Perplexity (current default), OpenAI, Ollama, Grok\n\nIf a user specifies an invalid model, the CLI lists available models clearly.\n\n### Example CLI Usage\n\nList current models:\n```shell\ntask-master models\n```\nOutput example:\n```\nCurrent AI Model Configuration:\n- Main Operations: Claude\n- Research Operations: Perplexity\n```\n\nSet new models:\n```shell\ntask-master models --set-main=\"gemini\" --set-research=\"grok\"\n```\n\nAttempt invalid model:\n```shell\ntask-master models --set-main=\"invalidModel\"\n```\nOutput example:\n```\nError: \"invalidModel\" is not a valid model.\n\nAvailable models for Main Operations:\n- claude\n- openai\n- ollama\n- gemini\n- openrouter\n```\n\n### High-Level Workflow\n1. Update CLI parsing logic to handle new `models` command and associated flags.\n2. Consolidate all AI calls into `ai-services.js` for centralized management.\n3. Utilize Vercel's AI SDK to implement robust wrapper functions for each AI API:\n - Claude (existing)\n - Perplexity (existing)\n - OpenAI\n - Ollama\n - Gemini\n - OpenRouter\n - Grok\n4. Update environment variables and provide clear documentation in `.env_example`:\n```env\n# MAIN_MODEL options: claude, openai, ollama, gemini, openrouter\nMAIN_MODEL=claude\n\n# RESEARCH_MODEL options: perplexity, openai, ollama, grok\nRESEARCH_MODEL=perplexity\n```\n5. Ensure dynamic model switching via environment variables or configuration management.\n6. Provide clear CLI feedback and validation of model names.\n\n### Vercel AI SDK Integration\n- Use Vercel's AI SDK to abstract API calls for supported models, ensuring consistent error handling and response formatting.\n- Implement a configuration layer to map model names to their respective Vercel SDK integrations.\n- Example pattern for integration:\n```javascript\nimport { createClient } from '@vercel/ai';\n\nconst clients = {\n claude: createClient({ provider: 'anthropic', apiKey: process.env.ANTHROPIC_API_KEY }),\n openai: createClient({ provider: 'openai', apiKey: process.env.OPENAI_API_KEY }),\n ollama: createClient({ provider: 'ollama', apiKey: process.env.OLLAMA_API_KEY }),\n gemini: createClient({ provider: 'gemini', apiKey: process.env.GEMINI_API_KEY }),\n openrouter: createClient({ provider: 'openrouter', apiKey: process.env.OPENROUTER_API_KEY }),\n perplexity: createClient({ provider: 'perplexity', apiKey: process.env.PERPLEXITY_API_KEY }),\n grok: createClient({ provider: 'grok', apiKey: process.env.GROK_API_KEY })\n};\n\nexport function getClient(model) {\n if (!clients[model]) {\n throw new Error(`Invalid model: ${model}`);\n }\n return clients[model];\n}\n```\n- Leverage `generateText` and `streamText` functions from the SDK for text generation and streaming capabilities.\n- Ensure compatibility with serverless and edge deployments using Vercel's infrastructure.\n\n### Key Elements\n- Enhanced model visibility and intuitive management commands.\n- Centralized and robust handling of AI API integrations via Vercel AI SDK.\n- Clear CLI responses with detailed validation feedback.\n- Flexible, easy-to-understand environment configuration.\n\n### Implementation Considerations\n- Centralize all AI interactions through a single, maintainable module (`ai-services.js`).\n- Ensure comprehensive error handling for invalid model selections.\n- Clearly document environment variable options and their purposes.\n- Validate model names rigorously to prevent runtime errors.\n\n### Out of Scope (Future Considerations)\n- Automatic benchmarking or model performance comparison.\n- Dynamic runtime switching of models based on task type or complexity.", "details": "### Proposed Solution\nImplement an intuitive CLI command for AI model management, leveraging Vercel's AI SDK for seamless integration:\n\n- `task-master models`: Lists currently configured models for main operations and research.\n- `task-master models --set-main=\"<model_name>\" --set-research=\"<model_name>\"`: Sets the desired models for main operations and research tasks respectively.\n\nSupported AI Models:\n- **Main Operations:** Claude (current default), OpenAI, Ollama, Gemini, OpenRouter\n- **Research Operations:** Perplexity (current default), OpenAI, Ollama, Grok\n\nIf a user specifies an invalid model, the CLI lists available models clearly.\n\n### Example CLI Usage\n\nList current models:\n```shell\ntask-master models\n```\nOutput example:\n```\nCurrent AI Model Configuration:\n- Main Operations: Claude\n- Research Operations: Perplexity\n```\n\nSet new models:\n```shell\ntask-master models --set-main=\"gemini\" --set-research=\"grok\"\n```\n\nAttempt invalid model:\n```shell\ntask-master models --set-main=\"invalidModel\"\n```\nOutput example:\n```\nError: \"invalidModel\" is not a valid model.\n\nAvailable models for Main Operations:\n- claude\n- openai\n- ollama\n- gemini\n- openrouter\n```\n\n### High-Level Workflow\n1. Update CLI parsing logic to handle new `models` command and associated flags.\n2. Consolidate all AI calls into `ai-services.js` for centralized management.\n3. Utilize Vercel's AI SDK to implement robust wrapper functions for each AI API:\n - Claude (existing)\n - Perplexity (existing)\n - OpenAI\n - Ollama\n - Gemini\n - OpenRouter\n - Grok\n4. Update environment variables and provide clear documentation in `.env_example`:\n```env\n# MAIN_MODEL options: claude, openai, ollama, gemini, openrouter\nMAIN_MODEL=claude\n\n# RESEARCH_MODEL options: perplexity, openai, ollama, grok\nRESEARCH_MODEL=perplexity\n```\n5. Ensure dynamic model switching via environment variables or configuration management.\n6. Provide clear CLI feedback and validation of model names.\n\n### Vercel AI SDK Integration\n- Use Vercel's AI SDK to abstract API calls for supported models, ensuring consistent error handling and response formatting.\n- Implement a configuration layer to map model names to their respective Vercel SDK integrations.\n- Example pattern for integration:\n```javascript\nimport { createClient } from '@vercel/ai';\n\nconst clients = {\n claude: createClient({ provider: 'anthropic', apiKey: process.env.ANTHROPIC_API_KEY }),\n openai: createClient({ provider: 'openai', apiKey: process.env.OPENAI_API_KEY }),\n ollama: createClient({ provider: 'ollama', apiKey: process.env.OLLAMA_API_KEY }),\n gemini: createClient({ provider: 'gemini', apiKey: process.env.GEMINI_API_KEY }),\n openrouter: createClient({ provider: 'openrouter', apiKey: process.env.OPENROUTER_API_KEY }),\n perplexity: createClient({ provider: 'perplexity', apiKey: process.env.PERPLEXITY_API_KEY }),\n grok: createClient({ provider: 'grok', apiKey: process.env.GROK_API_KEY })\n};\n\nexport function getClient(model) {\n if (!clients[model]) {\n throw new Error(`Invalid model: ${model}`);\n }\n return clients[model];\n}\n```\n- Leverage `generateText` and `streamText` functions from the SDK for text generation and streaming capabilities.\n- Ensure compatibility with serverless and edge deployments using Vercel's infrastructure.\n\n### Key Elements\n- Enhanced model visibility and intuitive management commands.\n- Centralized and robust handling of AI API integrations via Vercel AI SDK.\n- Clear CLI responses with detailed validation feedback.\n- Flexible, easy-to-understand environment configuration.\n\n### Implementation Considerations\n- Centralize all AI interactions through a single, maintainable module (`ai-services.js`).\n- Ensure comprehensive error handling for invalid model selections.\n- Clearly document environment variable options and their purposes.\n- Validate model names rigorously to prevent runtime errors.\n\n### Out of Scope (Future Considerations)\n- Automatic benchmarking or model performance comparison.\n- Dynamic runtime switching of models based on task type or complexity.",
"testStrategy": "### Test Strategy\n1. **Unit Tests**:\n - Test CLI commands for listing, setting, and validating models.\n - Mock Vercel AI SDK calls to ensure proper integration and error handling.\n\n2. **Integration Tests**:\n - Validate end-to-end functionality of model management commands.\n - Test dynamic switching of models via environment variables.\n\n3. **Error Handling Tests**:\n - Simulate invalid model names and verify error messages.\n - Test API failures for each model provider and ensure graceful degradation.\n\n4. **Documentation Validation**:\n - Verify that `.env_example` and CLI usage examples are accurate and comprehensive.\n\n5. **Performance Tests**:\n - Measure response times for API calls through Vercel AI SDK.\n - Ensure no significant latency is introduced by model switching.\n\n6. **SDK-Specific Tests**:\n - Validate the behavior of `generateText` and `streamText` functions for supported models.\n - Test compatibility with serverless and edge deployments.", "testStrategy": "### Test Strategy\n1. **Unit Tests**:\n - Test CLI commands for listing, setting, and validating models.\n - Mock Vercel AI SDK calls to ensure proper integration and error handling.\n\n2. **Integration Tests**:\n - Validate end-to-end functionality of model management commands.\n - Test dynamic switching of models via environment variables.\n\n3. **Error Handling Tests**:\n - Simulate invalid model names and verify error messages.\n - Test API failures for each model provider and ensure graceful degradation.\n\n4. **Documentation Validation**:\n - Verify that `.env_example` and CLI usage examples are accurate and comprehensive.\n\n5. **Performance Tests**:\n - Measure response times for API calls through Vercel AI SDK.\n - Ensure no significant latency is introduced by model switching.\n\n6. **SDK-Specific Tests**:\n - Validate the behavior of `generateText` and `streamText` functions for supported models.\n - Test compatibility with serverless and edge deployments.",
"status": "pending", "status": "in-progress",
"dependencies": [], "dependencies": [],
"priority": "high", "priority": "high",
"subtasks": [ "subtasks": [
@@ -2753,7 +2753,7 @@
"description": "Develop a centralized configuration module to manage AI model settings and preferences, leveraging the Strategy pattern for model selection.", "description": "Develop a centralized configuration module to manage AI model settings and preferences, leveraging the Strategy pattern for model selection.",
"dependencies": [], "dependencies": [],
"details": "1. Create a new `config-manager.js` module to handle model configuration\n2. Implement functions to read/write model preferences to a local config file\n3. Define model validation logic with clear error messages\n4. Create mapping of valid models for main and research operations\n5. Implement getters and setters for model configuration\n6. Add utility functions to validate model names against available options\n7. Include default fallback models\n8. Testing approach: Write unit tests to verify config reading/writing and model validation logic\n\n<info added on 2025-04-14T21:54:28.887Z>\nHere's the additional information to add:\n\n```\nThe configuration management module should:\n\n1. Use a `.taskmasterconfig` JSON file in the project root directory to store model settings\n2. Structure the config file with two main keys: `main` and `research` for respective model selections\n3. Implement functions to locate the project root directory (using package.json as reference)\n4. Define constants for valid models:\n ```javascript\n const VALID_MAIN_MODELS = ['gpt-4', 'gpt-3.5-turbo', 'gpt-4-turbo'];\n const VALID_RESEARCH_MODELS = ['gpt-4', 'gpt-4-turbo', 'claude-2'];\n const DEFAULT_MAIN_MODEL = 'gpt-3.5-turbo';\n const DEFAULT_RESEARCH_MODEL = 'gpt-4';\n ```\n5. Implement model getters with priority order:\n - First check `.taskmasterconfig` file\n - Fall back to environment variables if config file missing/invalid\n - Use defaults as last resort\n6. Implement model setters that validate input against valid model lists before updating config\n7. Keep API key management in `ai-services.js` using environment variables (don't store keys in config file)\n8. Add helper functions for config file operations:\n ```javascript\n function getConfigPath() { /* locate .taskmasterconfig */ }\n function readConfig() { /* read and parse config file */ }\n function writeConfig(config) { /* stringify and write config */ }\n ```\n9. Include error handling for file operations and invalid configurations\n```\n</info added on 2025-04-14T21:54:28.887Z>\n\n<info added on 2025-04-14T22:52:29.551Z>\n```\nThe configuration management module should be updated to:\n\n1. Separate model configuration into provider and modelId components:\n ```javascript\n // Example config structure\n {\n \"models\": {\n \"main\": {\n \"provider\": \"openai\",\n \"modelId\": \"gpt-3.5-turbo\"\n },\n \"research\": {\n \"provider\": \"openai\",\n \"modelId\": \"gpt-4\"\n }\n }\n }\n ```\n\n2. Define provider constants:\n ```javascript\n const VALID_MAIN_PROVIDERS = ['openai', 'anthropic', 'local'];\n const VALID_RESEARCH_PROVIDERS = ['openai', 'anthropic', 'cohere'];\n const DEFAULT_MAIN_PROVIDER = 'openai';\n const DEFAULT_RESEARCH_PROVIDER = 'openai';\n ```\n\n3. Implement optional MODEL_MAP for validation:\n ```javascript\n const MODEL_MAP = {\n 'openai': ['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo'],\n 'anthropic': ['claude-2', 'claude-instant'],\n 'cohere': ['command', 'command-light'],\n 'local': ['llama2', 'mistral']\n };\n ```\n\n4. Update getter functions to handle provider/modelId separation:\n ```javascript\n function getMainProvider() { /* return provider with fallbacks */ }\n function getMainModelId() { /* return modelId with fallbacks */ }\n function getResearchProvider() { /* return provider with fallbacks */ }\n function getResearchModelId() { /* return modelId with fallbacks */ }\n ```\n\n5. Update setter functions to validate both provider and modelId:\n ```javascript\n function setMainModel(provider, modelId) {\n // Validate provider is in VALID_MAIN_PROVIDERS\n // Optionally validate modelId is valid for provider using MODEL_MAP\n // Update config file with new values\n }\n ```\n\n6. Add utility functions for provider-specific validation:\n ```javascript\n function isValidProviderModelCombination(provider, modelId) {\n return MODEL_MAP[provider]?.includes(modelId) || false;\n }\n ```\n\n7. Extend unit tests to cover provider/modelId separation, including:\n - Testing provider validation\n - Testing provider-modelId combination validation\n - Verifying getters return correct provider and modelId values\n - Confirming setters properly validate and store both components\n```\n</info added on 2025-04-14T22:52:29.551Z>", "details": "1. Create a new `config-manager.js` module to handle model configuration\n2. Implement functions to read/write model preferences to a local config file\n3. Define model validation logic with clear error messages\n4. Create mapping of valid models for main and research operations\n5. Implement getters and setters for model configuration\n6. Add utility functions to validate model names against available options\n7. Include default fallback models\n8. Testing approach: Write unit tests to verify config reading/writing and model validation logic\n\n<info added on 2025-04-14T21:54:28.887Z>\nHere's the additional information to add:\n\n```\nThe configuration management module should:\n\n1. Use a `.taskmasterconfig` JSON file in the project root directory to store model settings\n2. Structure the config file with two main keys: `main` and `research` for respective model selections\n3. Implement functions to locate the project root directory (using package.json as reference)\n4. Define constants for valid models:\n ```javascript\n const VALID_MAIN_MODELS = ['gpt-4', 'gpt-3.5-turbo', 'gpt-4-turbo'];\n const VALID_RESEARCH_MODELS = ['gpt-4', 'gpt-4-turbo', 'claude-2'];\n const DEFAULT_MAIN_MODEL = 'gpt-3.5-turbo';\n const DEFAULT_RESEARCH_MODEL = 'gpt-4';\n ```\n5. Implement model getters with priority order:\n - First check `.taskmasterconfig` file\n - Fall back to environment variables if config file missing/invalid\n - Use defaults as last resort\n6. Implement model setters that validate input against valid model lists before updating config\n7. Keep API key management in `ai-services.js` using environment variables (don't store keys in config file)\n8. Add helper functions for config file operations:\n ```javascript\n function getConfigPath() { /* locate .taskmasterconfig */ }\n function readConfig() { /* read and parse config file */ }\n function writeConfig(config) { /* stringify and write config */ }\n ```\n9. Include error handling for file operations and invalid configurations\n```\n</info added on 2025-04-14T21:54:28.887Z>\n\n<info added on 2025-04-14T22:52:29.551Z>\n```\nThe configuration management module should be updated to:\n\n1. Separate model configuration into provider and modelId components:\n ```javascript\n // Example config structure\n {\n \"models\": {\n \"main\": {\n \"provider\": \"openai\",\n \"modelId\": \"gpt-3.5-turbo\"\n },\n \"research\": {\n \"provider\": \"openai\",\n \"modelId\": \"gpt-4\"\n }\n }\n }\n ```\n\n2. Define provider constants:\n ```javascript\n const VALID_MAIN_PROVIDERS = ['openai', 'anthropic', 'local'];\n const VALID_RESEARCH_PROVIDERS = ['openai', 'anthropic', 'cohere'];\n const DEFAULT_MAIN_PROVIDER = 'openai';\n const DEFAULT_RESEARCH_PROVIDER = 'openai';\n ```\n\n3. Implement optional MODEL_MAP for validation:\n ```javascript\n const MODEL_MAP = {\n 'openai': ['gpt-3.5-turbo', 'gpt-4', 'gpt-4-turbo'],\n 'anthropic': ['claude-2', 'claude-instant'],\n 'cohere': ['command', 'command-light'],\n 'local': ['llama2', 'mistral']\n };\n ```\n\n4. Update getter functions to handle provider/modelId separation:\n ```javascript\n function getMainProvider() { /* return provider with fallbacks */ }\n function getMainModelId() { /* return modelId with fallbacks */ }\n function getResearchProvider() { /* return provider with fallbacks */ }\n function getResearchModelId() { /* return modelId with fallbacks */ }\n ```\n\n5. Update setter functions to validate both provider and modelId:\n ```javascript\n function setMainModel(provider, modelId) {\n // Validate provider is in VALID_MAIN_PROVIDERS\n // Optionally validate modelId is valid for provider using MODEL_MAP\n // Update config file with new values\n }\n ```\n\n6. Add utility functions for provider-specific validation:\n ```javascript\n function isValidProviderModelCombination(provider, modelId) {\n return MODEL_MAP[provider]?.includes(modelId) || false;\n }\n ```\n\n7. Extend unit tests to cover provider/modelId separation, including:\n - Testing provider validation\n - Testing provider-modelId combination validation\n - Verifying getters return correct provider and modelId values\n - Confirming setters properly validate and store both components\n```\n</info added on 2025-04-14T22:52:29.551Z>",
"status": "in-progress", "status": "done",
"parentTaskId": 61 "parentTaskId": 61
}, },
{ {
@@ -2811,7 +2811,7 @@
4 4
], ],
"details": "1. Create handler for the models command without flags\n2. Implement formatted output showing current model configuration\n3. Add color-coding for better readability using a library like chalk\n4. Include version information for each configured model\n5. Show API status indicators (connected/disconnected)\n6. Display usage examples for changing models\n7. Add support for verbose output with additional details\n8. Testing approach: Create integration tests that verify correct output formatting and content", "details": "1. Create handler for the models command without flags\n2. Implement formatted output showing current model configuration\n3. Add color-coding for better readability using a library like chalk\n4. Include version information for each configured model\n5. Show API status indicators (connected/disconnected)\n6. Display usage examples for changing models\n7. Add support for verbose output with additional details\n8. Testing approach: Create integration tests that verify correct output formatting and content",
"status": "pending", "status": "done",
"parentTaskId": 61 "parentTaskId": 61
}, },
{ {
@@ -2825,7 +2825,7 @@
6 6
], ],
"details": "1. Create handlers for '--set-main' and '--set-research' flags\n2. Implement validation logic for model names\n3. Add clear error messages for invalid model selections\n4. Implement confirmation messages for successful model changes\n5. Add support for setting both models in a single command\n6. Implement dry-run option to validate without making changes\n7. Add verbose output option for debugging\n8. Testing approach: Create integration tests that verify model setting functionality with various inputs", "details": "1. Create handlers for '--set-main' and '--set-research' flags\n2. Implement validation logic for model names\n3. Add clear error messages for invalid model selections\n4. Implement confirmation messages for successful model changes\n5. Add support for setting both models in a single command\n6. Implement dry-run option to validate without making changes\n7. Add verbose output option for debugging\n8. Testing approach: Create integration tests that verify model setting functionality with various inputs",
"status": "pending", "status": "done",
"parentTaskId": 61 "parentTaskId": 61
}, },
{ {

16
tests/fixtures/.taskmasterconfig vendored Normal file
View File

@@ -0,0 +1,16 @@
{
"models": {
"main": {
"provider": "openai",
"modelId": "gpt-4o"
},
"research": {
"provider": "perplexity",
"modelId": "sonar-pro"
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-haiku-20240307"
}
}
}

View File

@@ -0,0 +1,350 @@
import { jest } from '@jest/globals';
// --- Define mock functions ---
const mockGetMainModelId = jest.fn().mockReturnValue('claude-3-opus');
const mockGetResearchModelId = jest.fn().mockReturnValue('gpt-4-turbo');
const mockGetFallbackModelId = jest.fn().mockReturnValue('claude-3-haiku');
const mockSetMainModel = jest.fn().mockResolvedValue(true);
const mockSetResearchModel = jest.fn().mockResolvedValue(true);
const mockSetFallbackModel = jest.fn().mockResolvedValue(true);
const mockGetAvailableModels = jest.fn().mockReturnValue([
{ id: 'claude-3-opus', name: 'Claude 3 Opus', provider: 'anthropic' },
{ id: 'gpt-4-turbo', name: 'GPT-4 Turbo', provider: 'openai' },
{ id: 'claude-3-haiku', name: 'Claude 3 Haiku', provider: 'anthropic' },
{ id: 'claude-3-sonnet', name: 'Claude 3 Sonnet', provider: 'anthropic' }
]);
// Mock UI related functions
const mockDisplayHelp = jest.fn();
const mockDisplayBanner = jest.fn();
const mockLog = jest.fn();
const mockStartLoadingIndicator = jest.fn(() => ({ stop: jest.fn() }));
const mockStopLoadingIndicator = jest.fn();
// --- Setup mocks using unstable_mockModule (recommended for ES modules) ---
jest.unstable_mockModule('../../../scripts/modules/config-manager.js', () => ({
getMainModelId: mockGetMainModelId,
getResearchModelId: mockGetResearchModelId,
getFallbackModelId: mockGetFallbackModelId,
setMainModel: mockSetMainModel,
setResearchModel: mockSetResearchModel,
setFallbackModel: mockSetFallbackModel,
getAvailableModels: mockGetAvailableModels,
VALID_PROVIDERS: ['anthropic', 'openai']
}));
jest.unstable_mockModule('../../../scripts/modules/ui.js', () => ({
displayHelp: mockDisplayHelp,
displayBanner: mockDisplayBanner,
log: mockLog,
startLoadingIndicator: mockStartLoadingIndicator,
stopLoadingIndicator: mockStopLoadingIndicator
}));
// --- Mock chalk for consistent output formatting ---
const mockChalk = {
red: jest.fn((text) => text),
yellow: jest.fn((text) => text),
blue: jest.fn((text) => text),
green: jest.fn((text) => text),
gray: jest.fn((text) => text),
dim: jest.fn((text) => text),
bold: {
cyan: jest.fn((text) => text),
white: jest.fn((text) => text),
red: jest.fn((text) => text)
},
cyan: {
bold: jest.fn((text) => text)
},
white: {
bold: jest.fn((text) => text)
}
};
// Default function for chalk itself
mockChalk.default = jest.fn((text) => text);
// Add the methods to the function itself for dual usage
Object.keys(mockChalk).forEach((key) => {
if (key !== 'default') mockChalk.default[key] = mockChalk[key];
});
jest.unstable_mockModule('chalk', () => ({
default: mockChalk.default
}));
// --- Import modules (AFTER mock setup) ---
let configManager, ui, chalk;
describe('CLI Models Command (Action Handler Test)', () => {
// Setup dynamic imports before tests run
beforeAll(async () => {
configManager = await import('../../../scripts/modules/config-manager.js');
ui = await import('../../../scripts/modules/ui.js');
chalk = (await import('chalk')).default;
});
// --- Replicate the action handler logic from commands.js ---
async function modelsAction(options) {
options = options || {}; // Ensure options object exists
const availableModels = configManager.getAvailableModels();
const findProvider = (modelId) => {
const modelInfo = availableModels.find((m) => m.id === modelId);
return modelInfo?.provider;
};
let modelSetAction = false;
try {
if (options.setMain) {
const modelId = options.setMain;
if (typeof modelId !== 'string' || modelId.trim() === '') {
console.error(
chalk.red('Error: --set-main flag requires a valid model ID.')
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
)
);
process.exit(1);
}
if (await configManager.setMainModel(provider, modelId)) {
console.log(
chalk.green(`Main model set to: ${modelId} (Provider: ${provider})`)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set main model.`));
process.exit(1);
}
}
if (options.setResearch) {
const modelId = options.setResearch;
if (typeof modelId !== 'string' || modelId.trim() === '') {
console.error(
chalk.red('Error: --set-research flag requires a valid model ID.')
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
)
);
process.exit(1);
}
if (await configManager.setResearchModel(provider, modelId)) {
console.log(
chalk.green(
`Research model set to: ${modelId} (Provider: ${provider})`
)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set research model.`));
process.exit(1);
}
}
if (options.setFallback) {
const modelId = options.setFallback;
if (typeof modelId !== 'string' || modelId.trim() === '') {
console.error(
chalk.red('Error: --set-fallback flag requires a valid model ID.')
);
process.exit(1);
}
const provider = findProvider(modelId);
if (!provider) {
console.error(
chalk.red(
`Error: Model ID "${modelId}" not found in available models.`
)
);
process.exit(1);
}
if (await configManager.setFallbackModel(provider, modelId)) {
console.log(
chalk.green(
`Fallback model set to: ${modelId} (Provider: ${provider})`
)
);
modelSetAction = true;
} else {
console.error(chalk.red(`Failed to set fallback model.`));
process.exit(1);
}
}
if (!modelSetAction) {
const currentMain = configManager.getMainModelId();
const currentResearch = configManager.getResearchModelId();
const currentFallback = configManager.getFallbackModelId();
if (!availableModels || availableModels.length === 0) {
console.log(chalk.yellow('No models defined in configuration.'));
return;
}
// Create a mock table for testing - avoid using Table constructor
const mockTableData = [];
availableModels.forEach((model) => {
if (model.id.startsWith('[') && model.id.endsWith(']')) return;
mockTableData.push([
model.id,
model.name || 'N/A',
model.provider || 'N/A',
model.id === currentMain ? chalk.green(' ✓') : '',
model.id === currentResearch ? chalk.green(' ✓') : '',
model.id === currentFallback ? chalk.green(' ✓') : ''
]);
});
// In a real implementation, we would use cli-table3, but for testing
// we'll just log 'Mock Table Output'
console.log('Mock Table Output');
}
} catch (error) {
// Use ui.log mock if available, otherwise console.error
(ui.log || console.error)(
`Error processing models command: ${error.message}`,
'error'
);
if (error.stack) {
(ui.log || console.error)(error.stack, 'debug');
}
throw error; // Re-throw for test failure
}
}
// --- End of Action Handler Logic ---
let originalConsoleLog;
let originalConsoleError;
let originalProcessExit;
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Save original console methods
originalConsoleLog = console.log;
originalConsoleError = console.error;
originalProcessExit = process.exit;
// Mock console and process.exit
console.log = jest.fn();
console.error = jest.fn();
process.exit = jest.fn((code) => {
throw new Error(`process.exit(${code}) called`);
});
});
afterEach(() => {
// Restore original console methods
console.log = originalConsoleLog;
console.error = originalConsoleError;
process.exit = originalProcessExit;
});
// --- Test Cases (Calling modelsAction directly) ---
it('should call setMainModel with correct provider and ID', async () => {
const modelId = 'claude-3-opus';
const expectedProvider = 'anthropic';
await modelsAction({ setMain: modelId });
expect(mockSetMainModel).toHaveBeenCalledWith(expectedProvider, modelId);
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining(`Main model set to: ${modelId}`)
);
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining(`(Provider: ${expectedProvider})`)
);
});
it('should show an error if --set-main model ID is not found', async () => {
await expect(
modelsAction({ setMain: 'non-existent-model' })
).rejects.toThrow(/process.exit/); // Expect exit call
expect(mockSetMainModel).not.toHaveBeenCalled();
expect(console.error).toHaveBeenCalledWith(
expect.stringContaining('Model ID "non-existent-model" not found')
);
});
it('should call setResearchModel with correct provider and ID', async () => {
const modelId = 'gpt-4-turbo';
const expectedProvider = 'openai';
await modelsAction({ setResearch: modelId });
expect(mockSetResearchModel).toHaveBeenCalledWith(
expectedProvider,
modelId
);
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining(`Research model set to: ${modelId}`)
);
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining(`(Provider: ${expectedProvider})`)
);
});
it('should call setFallbackModel with correct provider and ID', async () => {
const modelId = 'claude-3-haiku';
const expectedProvider = 'anthropic';
await modelsAction({ setFallback: modelId });
expect(mockSetFallbackModel).toHaveBeenCalledWith(
expectedProvider,
modelId
);
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining(`Fallback model set to: ${modelId}`)
);
expect(console.log).toHaveBeenCalledWith(
expect.stringContaining(`(Provider: ${expectedProvider})`)
);
});
it('should call all set*Model functions when all flags are used', async () => {
const mainModelId = 'claude-3-opus';
const researchModelId = 'gpt-4-turbo';
const fallbackModelId = 'claude-3-haiku';
const mainProvider = 'anthropic';
const researchProvider = 'openai';
const fallbackProvider = 'anthropic';
await modelsAction({
setMain: mainModelId,
setResearch: researchModelId,
setFallback: fallbackModelId
});
expect(mockSetMainModel).toHaveBeenCalledWith(mainProvider, mainModelId);
expect(mockSetResearchModel).toHaveBeenCalledWith(
researchProvider,
researchModelId
);
expect(mockSetFallbackModel).toHaveBeenCalledWith(
fallbackProvider,
fallbackModelId
);
});
it('should call specific get*ModelId and getAvailableModels and log table when run without flags', async () => {
await modelsAction({}); // Call with empty options
expect(mockGetMainModelId).toHaveBeenCalled();
expect(mockGetResearchModelId).toHaveBeenCalled();
expect(mockGetFallbackModelId).toHaveBeenCalled();
expect(mockGetAvailableModels).toHaveBeenCalled();
expect(console.log).toHaveBeenCalled();
// Check the mocked Table.toString() was used via console.log
expect(console.log).toHaveBeenCalledWith('Mock Table Output');
});
});

View File

@@ -25,9 +25,9 @@ global.wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
if (process.env.SILENCE_CONSOLE === 'true') { if (process.env.SILENCE_CONSOLE === 'true') {
global.console = { global.console = {
...console, ...console,
log: jest.fn(), log: () => {},
info: jest.fn(), info: () => {},
warn: jest.fn(), warn: () => {},
error: jest.fn() error: () => {}
}; };
} }

View File

@@ -59,7 +59,8 @@ const DEFAULT_CONFIG = {
const VALID_CUSTOM_CONFIG = { const VALID_CUSTOM_CONFIG = {
models: { models: {
main: { provider: 'openai', modelId: 'gpt-4o' }, main: { provider: 'openai', modelId: 'gpt-4o' },
research: { provider: 'google', modelId: 'gemini-1.5-pro-latest' } research: { provider: 'google', modelId: 'gemini-1.5-pro-latest' },
fallback: { provider: undefined, modelId: undefined }
} }
}; };
@@ -67,6 +68,7 @@ const PARTIAL_CONFIG = {
models: { models: {
main: { provider: 'openai', modelId: 'gpt-4-turbo' } main: { provider: 'openai', modelId: 'gpt-4-turbo' }
// research missing // research missing
// fallback will be added by readConfig
} }
}; };
@@ -90,9 +92,66 @@ const resetMocks = () => {
mockWriteFileSync.mockReset(); mockWriteFileSync.mockReset();
mockMkdirSync.mockReset(); mockMkdirSync.mockReset();
// Default behaviors // Default behaviors - CRITICAL: Mock supported-models.json read
mockExistsSync.mockReturnValue(true); mockReadFileSync.mockImplementation((filePath) => {
mockReadFileSync.mockReturnValue(JSON.stringify(DEFAULT_CONFIG)); if (filePath.endsWith('supported-models.json')) {
// Return a mock structure including allowed_roles
return JSON.stringify({
openai: [
{
id: 'gpt-4o',
swe_score: 0,
cost_per_1m_tokens: null,
allowed_roles: ['main', 'fallback']
},
{
id: 'gpt-4',
swe_score: 0,
cost_per_1m_tokens: null,
allowed_roles: ['main', 'fallback']
}
],
google: [
{
id: 'gemini-1.5-pro-latest',
swe_score: 0,
cost_per_1m_tokens: null,
allowed_roles: ['main', 'fallback']
}
],
perplexity: [
{
id: 'sonar-pro',
swe_score: 0,
cost_per_1m_tokens: null,
allowed_roles: ['main', 'fallback', 'research']
}
],
anthropic: [
{
id: 'claude-3-opus-20240229',
swe_score: 0,
cost_per_1m_tokens: null,
allowed_roles: ['main', 'fallback']
},
{
id: 'claude-3.5-sonnet-20240620',
swe_score: 0,
cost_per_1m_tokens: null,
allowed_roles: ['main', 'fallback']
}
]
// Add other providers/models as needed for specific tests
});
} else if (filePath === MOCK_CONFIG_PATH) {
// Default for .taskmasterconfig reads
return JSON.stringify(DEFAULT_CONFIG);
}
// Handle other potential reads or throw an error for unexpected paths
throw new Error(`Unexpected readFileSync call in test: ${filePath}`);
});
mockExistsSync.mockReturnValue(true); // Default to file existing
}; };
// Set up module before tests // Set up module before tests
@@ -253,10 +312,9 @@ describe('readConfig', () => {
// --- writeConfig Tests --- // --- writeConfig Tests ---
describe('writeConfig', () => { describe('writeConfig', () => {
test('should write valid config to file', () => { test('should write valid config to file', () => {
mockExistsSync.mockReturnValue(true);
const success = configManager.writeConfig( const success = configManager.writeConfig(
VALID_CUSTOM_CONFIG, VALID_CUSTOM_CONFIG,
MOCK_PROJECT_ROOT MOCK_CONFIG_PATH
); );
expect(success).toBe(true); expect(success).toBe(true);
expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH); expect(mockExistsSync).toHaveBeenCalledWith(MOCK_CONFIG_PATH);
@@ -265,34 +323,29 @@ describe('writeConfig', () => {
JSON.stringify(VALID_CUSTOM_CONFIG, null, 2), JSON.stringify(VALID_CUSTOM_CONFIG, null, 2),
'utf-8' 'utf-8'
); );
expect(console.error).not.toHaveBeenCalled();
}); });
test('should return false and log error if write fails', () => { test('should return false and log error if write fails', () => {
mockExistsSync.mockReturnValue(true);
const writeError = new Error('Disk full');
mockWriteFileSync.mockImplementation(() => { mockWriteFileSync.mockImplementation(() => {
throw writeError; throw new Error('Disk full');
}); });
const success = configManager.writeConfig( const success = configManager.writeConfig(
VALID_CUSTOM_CONFIG, VALID_CUSTOM_CONFIG,
MOCK_PROJECT_ROOT MOCK_CONFIG_PATH
); );
expect(success).toBe(false); expect(success).toBe(false);
expect(console.error).toHaveBeenCalledWith( expect(console.error).toHaveBeenCalledWith(
expect.stringContaining( expect.stringContaining(
'Error writing to /mock/project/.taskmasterconfig: Disk full.' `Error writing configuration to ${MOCK_CONFIG_PATH}: Disk full`
) )
); );
}); });
test('should return false if config file does not exist', () => { test('should return false if config file does not exist', () => {
mockExistsSync.mockReturnValue(false); mockExistsSync.mockReturnValue(false);
const success = configManager.writeConfig( const success = configManager.writeConfig(VALID_CUSTOM_CONFIG);
VALID_CUSTOM_CONFIG,
MOCK_PROJECT_ROOT
);
expect(success).toBe(false); expect(success).toBe(false);
expect(mockWriteFileSync).not.toHaveBeenCalled(); expect(mockWriteFileSync).not.toHaveBeenCalled();