chore: adds a warning when custom openrouter model is a free model which suffers from lower rate limits, restricted context, and, worst of all, no access to tool_use.

This commit is contained in:
Eyal Toledano
2025-06-07 18:54:11 -04:00
parent bfd86eb9cc
commit cc04d53720
5 changed files with 574 additions and 523 deletions

View File

@@ -1,8 +1,8 @@
{ {
"models": { "models": {
"main": { "main": {
"provider": "anthropic", "provider": "openrouter",
"modelId": "claude-sonnet-4-20250514", "modelId": "qwen/qwen3-235b-a22b:free",
"maxTokens": 50000, "maxTokens": 50000,
"temperature": 0.2 "temperature": 0.2
}, },

View File

@@ -0,0 +1,35 @@
# Task ID: 97
# Title: Create Taskmaster Jingle Implementation
# Status: pending
# Dependencies: 95, 57, 3, 2
# Priority: medium
# Description: Develop a musical jingle system for Taskmaster that plays sound effects during key CLI interactions to enhance user experience.
# Details:
This task involves implementing a sound system that plays audio cues during Taskmaster CLI operations. Key implementation steps include:
1. Audio System Integration:
- Research and select appropriate audio library compatible with Node.js CLI applications
- Implement cross-platform audio playback (Windows, macOS, Linux)
- Create sound configuration options in .taskmasterconfig
2. Jingle Design:
- Define sound triggers for key events (task creation, completion, errors, etc.)
- Create or source appropriate sound files (WAV/MP3 format)
- Implement volume control and mute option in settings
3. CLI Integration:
- Add sound playback to core CLI commands (init, create, update, delete)
- Implement optional sound effects toggle via command line flags
- Ensure audio playback doesn't interfere with CLI performance
4. Documentation:
- Update user guide with sound configuration instructions
- Add troubleshooting section for audio playback issues
# Test Strategy:
1. Verify audio plays correctly during each supported CLI operation
2. Test sound configuration options across different platforms
3. Confirm volume control and mute functionality works as expected
4. Validate that audio playback doesn't affect CLI performance
5. Test edge cases (no audio hardware, invalid sound files, etc.)
6. Ensure sound effects can be disabled via configuration and CLI flags

View File

@@ -5871,6 +5871,22 @@
"parentTaskId": 96 "parentTaskId": 96
} }
] ]
},
{
"id": 97,
"title": "Create Taskmaster Jingle Implementation",
"description": "Develop a musical jingle system for Taskmaster that plays sound effects during key CLI interactions to enhance user experience.",
"details": "This task involves implementing a sound system that plays audio cues during Taskmaster CLI operations. Key implementation steps include:\n\n1. Audio System Integration:\n - Research and select appropriate audio library compatible with Node.js CLI applications\n - Implement cross-platform audio playback (Windows, macOS, Linux)\n - Create sound configuration options in .taskmasterconfig\n\n2. Jingle Design:\n - Define sound triggers for key events (task creation, completion, errors, etc.)\n - Create or source appropriate sound files (WAV/MP3 format)\n - Implement volume control and mute option in settings\n\n3. CLI Integration:\n - Add sound playback to core CLI commands (init, create, update, delete)\n - Implement optional sound effects toggle via command line flags\n - Ensure audio playback doesn't interfere with CLI performance\n\n4. Documentation:\n - Update user guide with sound configuration instructions\n - Add troubleshooting section for audio playback issues",
"testStrategy": "1. Verify audio plays correctly during each supported CLI operation\n2. Test sound configuration options across different platforms\n3. Confirm volume control and mute functionality works as expected\n4. Validate that audio playback doesn't affect CLI performance\n5. Test edge cases (no audio hardware, invalid sound files, etc.)\n6. Ensure sound effects can be disabled via configuration and CLI flags",
"status": "pending",
"dependencies": [
95,
57,
3,
2
],
"priority": "medium",
"subtasks": []
} }
] ]
} }

View File

@@ -388,13 +388,6 @@
"allowed_roles": ["main", "fallback"], "allowed_roles": ["main", "fallback"],
"max_tokens": 24000 "max_tokens": 24000
}, },
{
"id": "qwen/qwen3-235b-a22b:free",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 0.14, "output": 2 },
"allowed_roles": ["main", "fallback"],
"max_tokens": 24000
},
{ {
"id": "mistralai/mistral-small-3.1-24b-instruct:free", "id": "mistralai/mistral-small-3.1-24b-instruct:free",
"swe_score": 0, "swe_score": 0,

View File

@@ -3,8 +3,8 @@
* Core functionality for managing AI model configurations * Core functionality for managing AI model configurations
*/ */
import https from 'https'; import https from "https";
import http from 'http'; import http from "http";
import { import {
getMainModelId, getMainModelId,
getResearchModelId, getResearchModelId,
@@ -19,10 +19,10 @@ import {
writeConfig, writeConfig,
isConfigFilePresent, isConfigFilePresent,
getAllProviders, getAllProviders,
getBaseUrlForRole getBaseUrlForRole,
} from '../config-manager.js'; } from "../config-manager.js";
import { findConfigPath } from '../../../src/utils/path-utils.js'; import { findConfigPath } from "../../../src/utils/path-utils.js";
import { log } from '../utils.js'; import { log } from "../utils.js";
/** /**
* Fetches the list of models from OpenRouter API. * Fetches the list of models from OpenRouter API.
@@ -31,26 +31,26 @@ import { log } from '../utils.js';
function fetchOpenRouterModels() { function fetchOpenRouterModels() {
return new Promise((resolve) => { return new Promise((resolve) => {
const options = { const options = {
hostname: 'openrouter.ai', hostname: "openrouter.ai",
path: '/api/v1/models', path: "/api/v1/models",
method: 'GET', method: "GET",
headers: { headers: {
Accept: 'application/json' Accept: "application/json",
} },
}; };
const req = https.request(options, (res) => { const req = https.request(options, (res) => {
let data = ''; let data = "";
res.on('data', (chunk) => { res.on("data", (chunk) => {
data += chunk; data += chunk;
}); });
res.on('end', () => { res.on("end", () => {
if (res.statusCode === 200) { if (res.statusCode === 200) {
try { try {
const parsedData = JSON.parse(data); const parsedData = JSON.parse(data);
resolve(parsedData.data || []); // Return the array of models resolve(parsedData.data || []); // Return the array of models
} catch (e) { } catch (e) {
console.error('Error parsing OpenRouter response:', e); console.error("Error parsing OpenRouter response:", e);
resolve(null); // Indicate failure resolve(null); // Indicate failure
} }
} else { } else {
@@ -62,8 +62,8 @@ function fetchOpenRouterModels() {
}); });
}); });
req.on('error', (e) => { req.on("error", (e) => {
console.error('Error fetching OpenRouter models:', e); console.error("Error fetching OpenRouter models:", e);
resolve(null); // Indicate failure resolve(null); // Indicate failure
}); });
req.end(); req.end();
@@ -75,14 +75,14 @@ function fetchOpenRouterModels() {
* @param {string} baseURL - The base URL for the Ollama API (e.g., "http://localhost:11434/api") * @param {string} baseURL - The base URL for the Ollama API (e.g., "http://localhost:11434/api")
* @returns {Promise<Array|null>} A promise that resolves with the list of model objects or null if fetch fails. * @returns {Promise<Array|null>} A promise that resolves with the list of model objects or null if fetch fails.
*/ */
function fetchOllamaModels(baseURL = 'http://localhost:11434/api') { function fetchOllamaModels(baseURL = "http://localhost:11434/api") {
return new Promise((resolve) => { return new Promise((resolve) => {
try { try {
// Parse the base URL to extract hostname, port, and base path // Parse the base URL to extract hostname, port, and base path
const url = new URL(baseURL); const url = new URL(baseURL);
const isHttps = url.protocol === 'https:'; const isHttps = url.protocol === "https:";
const port = url.port || (isHttps ? 443 : 80); const port = url.port || (isHttps ? 443 : 80);
const basePath = url.pathname.endsWith('/') const basePath = url.pathname.endsWith("/")
? url.pathname.slice(0, -1) ? url.pathname.slice(0, -1)
: url.pathname; : url.pathname;
@@ -90,25 +90,25 @@ function fetchOllamaModels(baseURL = 'http://localhost:11434/api') {
hostname: url.hostname, hostname: url.hostname,
port: parseInt(port, 10), port: parseInt(port, 10),
path: `${basePath}/tags`, path: `${basePath}/tags`,
method: 'GET', method: "GET",
headers: { headers: {
Accept: 'application/json' Accept: "application/json",
} },
}; };
const requestLib = isHttps ? https : http; const requestLib = isHttps ? https : http;
const req = requestLib.request(options, (res) => { const req = requestLib.request(options, (res) => {
let data = ''; let data = "";
res.on('data', (chunk) => { res.on("data", (chunk) => {
data += chunk; data += chunk;
}); });
res.on('end', () => { res.on("end", () => {
if (res.statusCode === 200) { if (res.statusCode === 200) {
try { try {
const parsedData = JSON.parse(data); const parsedData = JSON.parse(data);
resolve(parsedData.models || []); // Return the array of models resolve(parsedData.models || []); // Return the array of models
} catch (e) { } catch (e) {
console.error('Error parsing Ollama response:', e); console.error("Error parsing Ollama response:", e);
resolve(null); // Indicate failure resolve(null); // Indicate failure
} }
} else { } else {
@@ -120,13 +120,13 @@ function fetchOllamaModels(baseURL = 'http://localhost:11434/api') {
}); });
}); });
req.on('error', (e) => { req.on("error", (e) => {
console.error('Error fetching Ollama models:', e); console.error("Error fetching Ollama models:", e);
resolve(null); // Indicate failure resolve(null); // Indicate failure
}); });
req.end(); req.end();
} catch (e) { } catch (e) {
console.error('Error parsing Ollama base URL:', e); console.error("Error parsing Ollama base URL:", e);
resolve(null); // Indicate failure resolve(null); // Indicate failure
} }
}); });
@@ -144,13 +144,13 @@ async function getModelConfiguration(options = {}) {
const { mcpLog, projectRoot, session } = options; const { mcpLog, projectRoot, session } = options;
const report = (level, ...args) => { const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') { if (mcpLog && typeof mcpLog[level] === "function") {
mcpLog[level](...args); mcpLog[level](...args);
} }
}; };
if (!projectRoot) { if (!projectRoot) {
throw new Error('Project root is required but not found.'); throw new Error("Project root is required but not found.");
} }
// Use centralized config path finding instead of hardcoded path // Use centralized config path finding instead of hardcoded path
@@ -158,11 +158,11 @@ async function getModelConfiguration(options = {}) {
const configExists = isConfigFilePresent(projectRoot); const configExists = isConfigFilePresent(projectRoot);
log( log(
'debug', "debug",
`Checking for config file using findConfigPath, found: ${configPath}` `Checking for config file using findConfigPath, found: ${configPath}`
); );
log( log(
'debug', "debug",
`Checking config file using isConfigFilePresent(), exists: ${configExists}` `Checking config file using isConfigFilePresent(), exists: ${configExists}`
); );
@@ -221,8 +221,8 @@ async function getModelConfiguration(options = {}) {
cost: mainModelData?.cost_per_1m_tokens || null, cost: mainModelData?.cost_per_1m_tokens || null,
keyStatus: { keyStatus: {
cli: mainCliKeyOk, cli: mainCliKeyOk,
mcp: mainMcpKeyOk mcp: mainMcpKeyOk,
} },
}, },
research: { research: {
provider: researchProvider, provider: researchProvider,
@@ -231,8 +231,8 @@ async function getModelConfiguration(options = {}) {
cost: researchModelData?.cost_per_1m_tokens || null, cost: researchModelData?.cost_per_1m_tokens || null,
keyStatus: { keyStatus: {
cli: researchCliKeyOk, cli: researchCliKeyOk,
mcp: researchMcpKeyOk mcp: researchMcpKeyOk,
} },
}, },
fallback: fallbackProvider fallback: fallbackProvider
? { ? {
@@ -242,22 +242,22 @@ async function getModelConfiguration(options = {}) {
cost: fallbackModelData?.cost_per_1m_tokens || null, cost: fallbackModelData?.cost_per_1m_tokens || null,
keyStatus: { keyStatus: {
cli: fallbackCliKeyOk, cli: fallbackCliKeyOk,
mcp: fallbackMcpKeyOk mcp: fallbackMcpKeyOk,
}
}
: null
}, },
message: 'Successfully retrieved current model configuration'
} }
: null,
},
message: "Successfully retrieved current model configuration",
},
}; };
} catch (error) { } catch (error) {
report('error', `Error getting model configuration: ${error.message}`); report("error", `Error getting model configuration: ${error.message}`);
return { return {
success: false, success: false,
error: { error: {
code: 'CONFIG_ERROR', code: "CONFIG_ERROR",
message: error.message message: error.message,
} },
}; };
} }
} }
@@ -274,13 +274,13 @@ async function getAvailableModelsList(options = {}) {
const { mcpLog, projectRoot } = options; const { mcpLog, projectRoot } = options;
const report = (level, ...args) => { const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') { if (mcpLog && typeof mcpLog[level] === "function") {
mcpLog[level](...args); mcpLog[level](...args);
} }
}; };
if (!projectRoot) { if (!projectRoot) {
throw new Error('Project root is required but not found.'); throw new Error("Project root is required but not found.");
} }
// Use centralized config path finding instead of hardcoded path // Use centralized config path finding instead of hardcoded path
@@ -288,11 +288,11 @@ async function getAvailableModelsList(options = {}) {
const configExists = isConfigFilePresent(projectRoot); const configExists = isConfigFilePresent(projectRoot);
log( log(
'debug', "debug",
`Checking for config file using findConfigPath, found: ${configPath}` `Checking for config file using findConfigPath, found: ${configPath}`
); );
log( log(
'debug', "debug",
`Checking config file using isConfigFilePresent(), exists: ${configExists}` `Checking config file using isConfigFilePresent(), exists: ${configExists}`
); );
@@ -311,8 +311,8 @@ async function getAvailableModelsList(options = {}) {
success: true, success: true,
data: { data: {
models: [], models: [],
message: 'No available models found' message: "No available models found",
} },
}; };
} }
@@ -326,28 +326,28 @@ async function getAvailableModelsList(options = {}) {
Boolean Boolean
); );
const otherAvailableModels = allAvailableModels.map((model) => ({ const otherAvailableModels = allAvailableModels.map((model) => ({
provider: model.provider || 'N/A', provider: model.provider || "N/A",
modelId: model.id, modelId: model.id,
sweScore: model.swe_score || null, sweScore: model.swe_score || null,
cost: model.cost_per_1m_tokens || null, cost: model.cost_per_1m_tokens || null,
allowedRoles: model.allowed_roles || [] allowedRoles: model.allowed_roles || [],
})); }));
return { return {
success: true, success: true,
data: { data: {
models: otherAvailableModels, models: otherAvailableModels,
message: `Successfully retrieved ${otherAvailableModels.length} available models` message: `Successfully retrieved ${otherAvailableModels.length} available models`,
} },
}; };
} catch (error) { } catch (error) {
report('error', `Error getting available models: ${error.message}`); report("error", `Error getting available models: ${error.message}`);
return { return {
success: false, success: false,
error: { error: {
code: 'MODELS_LIST_ERROR', code: "MODELS_LIST_ERROR",
message: error.message message: error.message,
} },
}; };
} }
} }
@@ -367,13 +367,13 @@ async function setModel(role, modelId, options = {}) {
const { mcpLog, projectRoot, providerHint } = options; const { mcpLog, projectRoot, providerHint } = options;
const report = (level, ...args) => { const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') { if (mcpLog && typeof mcpLog[level] === "function") {
mcpLog[level](...args); mcpLog[level](...args);
} }
}; };
if (!projectRoot) { if (!projectRoot) {
throw new Error('Project root is required but not found.'); throw new Error("Project root is required but not found.");
} }
// Use centralized config path finding instead of hardcoded path // Use centralized config path finding instead of hardcoded path
@@ -381,11 +381,11 @@ async function setModel(role, modelId, options = {}) {
const configExists = isConfigFilePresent(projectRoot); const configExists = isConfigFilePresent(projectRoot);
log( log(
'debug', "debug",
`Checking for config file using findConfigPath, found: ${configPath}` `Checking for config file using findConfigPath, found: ${configPath}`
); );
log( log(
'debug', "debug",
`Checking config file using isConfigFilePresent(), exists: ${configExists}` `Checking config file using isConfigFilePresent(), exists: ${configExists}`
); );
@@ -396,24 +396,24 @@ async function setModel(role, modelId, options = {}) {
} }
// Validate role // Validate role
if (!['main', 'research', 'fallback'].includes(role)) { if (!["main", "research", "fallback"].includes(role)) {
return { return {
success: false, success: false,
error: { error: {
code: 'INVALID_ROLE', code: "INVALID_ROLE",
message: `Invalid role: ${role}. Must be one of: main, research, fallback.` message: `Invalid role: ${role}. Must be one of: main, research, fallback.`,
} },
}; };
} }
// Validate model ID // Validate model ID
if (typeof modelId !== 'string' || modelId.trim() === '') { if (typeof modelId !== "string" || modelId.trim() === "") {
return { return {
success: false, success: false,
error: { error: {
code: 'INVALID_MODEL_ID', code: "INVALID_MODEL_ID",
message: `Invalid model ID: ${modelId}. Must be a non-empty string.` message: `Invalid model ID: ${modelId}. Must be a non-empty string.`,
} },
}; };
} }
@@ -434,33 +434,40 @@ async function setModel(role, modelId, options = {}) {
// Found internally AND provider matches the hint // Found internally AND provider matches the hint
determinedProvider = providerHint; determinedProvider = providerHint;
report( report(
'info', "info",
`Model ${modelId} found internally with matching provider hint ${determinedProvider}.` `Model ${modelId} found internally with matching provider hint ${determinedProvider}.`
); );
} else { } else {
// Either not found internally, OR found but under a DIFFERENT provider than hinted. // Either not found internally, OR found but under a DIFFERENT provider than hinted.
// Proceed with custom logic based ONLY on the hint. // Proceed with custom logic based ONLY on the hint.
if (providerHint === 'openrouter') { if (providerHint === "openrouter") {
// Check OpenRouter ONLY because hint was openrouter // Check OpenRouter ONLY because hint was openrouter
report('info', `Checking OpenRouter for ${modelId} (as hinted)...`); report("info", `Checking OpenRouter for ${modelId} (as hinted)...`);
const openRouterModels = await fetchOpenRouterModels(); const openRouterModels = await fetchOpenRouterModels();
if ( if (
openRouterModels && openRouterModels &&
openRouterModels.some((m) => m.id === modelId) openRouterModels.some((m) => m.id === modelId)
) { ) {
determinedProvider = 'openrouter'; determinedProvider = "openrouter";
// Check if this is a free model (ends with :free)
if (modelId.endsWith(":free")) {
warningMessage = `Warning: OpenRouter free model '${modelId}' selected. Free models have significant limitations including lower context windows, reduced rate limits, and may not support advanced features like tool_use. Consider using the paid version '${modelId.replace(":free", "")}' for full functionality.`;
} else {
warningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`; warningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`;
report('warn', warningMessage); }
report("warn", warningMessage);
} else { } else {
// Hinted as OpenRouter but not found in live check // Hinted as OpenRouter but not found in live check
throw new Error( throw new Error(
`Model ID "${modelId}" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.` `Model ID "${modelId}" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.`
); );
} }
} else if (providerHint === 'ollama') { } else if (providerHint === "ollama") {
// Check Ollama ONLY because hint was ollama // Check Ollama ONLY because hint was ollama
report('info', `Checking Ollama for ${modelId} (as hinted)...`); report("info", `Checking Ollama for ${modelId} (as hinted)...`);
// Get the Ollama base URL from config // Get the Ollama base URL from config
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot); const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
@@ -472,9 +479,9 @@ async function setModel(role, modelId, options = {}) {
`Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.` `Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
); );
} else if (ollamaModels.some((m) => m.model === modelId)) { } else if (ollamaModels.some((m) => m.model === modelId)) {
determinedProvider = 'ollama'; determinedProvider = "ollama";
warningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`; warningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`;
report('warn', warningMessage); report("warn", warningMessage);
} else { } else {
// Server is running but model not found // Server is running but model not found
const tagsUrl = `${ollamaBaseURL}/tags`; const tagsUrl = `${ollamaBaseURL}/tags`;
@@ -482,11 +489,11 @@ async function setModel(role, modelId, options = {}) {
`Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}` `Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`
); );
} }
} else if (providerHint === 'bedrock') { } else if (providerHint === "bedrock") {
// Set provider without model validation since Bedrock models are managed by AWS // Set provider without model validation since Bedrock models are managed by AWS
determinedProvider = 'bedrock'; determinedProvider = "bedrock";
warningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`; warningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`;
report('warn', warningMessage); report("warn", warningMessage);
} else { } else {
// Invalid provider hint - should not happen // Invalid provider hint - should not happen
throw new Error(`Invalid provider hint received: ${providerHint}`); throw new Error(`Invalid provider hint received: ${providerHint}`);
@@ -498,7 +505,7 @@ async function setModel(role, modelId, options = {}) {
// Found internally, use the provider from the internal list // Found internally, use the provider from the internal list
determinedProvider = modelData.provider; determinedProvider = modelData.provider;
report( report(
'info', "info",
`Model ${modelId} found internally with provider ${determinedProvider}.` `Model ${modelId} found internally with provider ${determinedProvider}.`
); );
} else { } else {
@@ -506,9 +513,9 @@ async function setModel(role, modelId, options = {}) {
return { return {
success: false, success: false,
error: { error: {
code: 'MODEL_NOT_FOUND_NO_HINT', code: "MODEL_NOT_FOUND_NO_HINT",
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.` message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.`,
} },
}; };
} }
} }
@@ -521,9 +528,9 @@ async function setModel(role, modelId, options = {}) {
return { return {
success: false, success: false,
error: { error: {
code: 'PROVIDER_UNDETERMINED', code: "PROVIDER_UNDETERMINED",
message: `Could not determine the provider for model ID "${modelId}".` message: `Could not determine the provider for model ID "${modelId}".`,
} },
}; };
} }
@@ -531,7 +538,7 @@ async function setModel(role, modelId, options = {}) {
currentConfig.models[role] = { currentConfig.models[role] = {
...currentConfig.models[role], // Keep existing params like maxTokens ...currentConfig.models[role], // Keep existing params like maxTokens
provider: determinedProvider, provider: determinedProvider,
modelId: modelId modelId: modelId,
}; };
// Write updated configuration // Write updated configuration
@@ -540,14 +547,14 @@ async function setModel(role, modelId, options = {}) {
return { return {
success: false, success: false,
error: { error: {
code: 'CONFIG_WRITE_ERROR', code: "CONFIG_WRITE_ERROR",
message: 'Error writing updated configuration to configuration file' message: "Error writing updated configuration to configuration file",
} },
}; };
} }
const successMessage = `Successfully set ${role} model to ${modelId} (Provider: ${determinedProvider})`; const successMessage = `Successfully set ${role} model to ${modelId} (Provider: ${determinedProvider})`;
report('info', successMessage); report("info", successMessage);
return { return {
success: true, success: true,
@@ -556,17 +563,17 @@ async function setModel(role, modelId, options = {}) {
provider: determinedProvider, provider: determinedProvider,
modelId, modelId,
message: successMessage, message: successMessage,
warning: warningMessage // Include warning in the response data warning: warningMessage, // Include warning in the response data
} },
}; };
} catch (error) { } catch (error) {
report('error', `Error setting ${role} model: ${error.message}`); report("error", `Error setting ${role} model: ${error.message}`);
return { return {
success: false, success: false,
error: { error: {
code: 'SET_MODEL_ERROR', code: "SET_MODEL_ERROR",
message: error.message message: error.message,
} },
}; };
} }
} }
@@ -582,7 +589,7 @@ async function setModel(role, modelId, options = {}) {
async function getApiKeyStatusReport(options = {}) { async function getApiKeyStatusReport(options = {}) {
const { mcpLog, projectRoot, session } = options; const { mcpLog, projectRoot, session } = options;
const report = (level, ...args) => { const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') { if (mcpLog && typeof mcpLog[level] === "function") {
mcpLog[level](...args); mcpLog[level](...args);
} }
}; };
@@ -590,7 +597,7 @@ async function getApiKeyStatusReport(options = {}) {
try { try {
const providers = getAllProviders(); const providers = getAllProviders();
const providersToCheck = providers.filter( const providersToCheck = providers.filter(
(p) => p.toLowerCase() !== 'ollama' (p) => p.toLowerCase() !== "ollama"
); // Ollama is not a provider, it's a service, doesn't need an api key usually ); // Ollama is not a provider, it's a service, doesn't need an api key usually
const statusReport = providersToCheck.map((provider) => { const statusReport = providersToCheck.map((provider) => {
// Use provided projectRoot for MCP status check // Use provided projectRoot for MCP status check
@@ -599,26 +606,26 @@ async function getApiKeyStatusReport(options = {}) {
return { return {
provider, provider,
cli: cliOk, cli: cliOk,
mcp: mcpOk mcp: mcpOk,
}; };
}); });
report('info', 'Successfully generated API key status report.'); report("info", "Successfully generated API key status report.");
return { return {
success: true, success: true,
data: { data: {
report: statusReport, report: statusReport,
message: 'API key status report generated.' message: "API key status report generated.",
} },
}; };
} catch (error) { } catch (error) {
report('error', `Error generating API key status report: ${error.message}`); report("error", `Error generating API key status report: ${error.message}`);
return { return {
success: false, success: false,
error: { error: {
code: 'API_KEY_STATUS_ERROR', code: "API_KEY_STATUS_ERROR",
message: error.message message: error.message,
} },
}; };
} }
} }
@@ -627,5 +634,5 @@ export {
getModelConfiguration, getModelConfiguration,
getAvailableModelsList, getAvailableModelsList,
setModel, setModel,
getApiKeyStatusReport getApiKeyStatusReport,
}; };