chore: adds a warning when custom openrouter model is a free model which suffers from lower rate limits, restricted context, and, worst of all, no access to tool_use.

This commit is contained in:
Eyal Toledano
2025-06-07 18:54:11 -04:00
parent 65b70d746a
commit 54005d5486
5 changed files with 2027 additions and 1403 deletions

View File

@@ -1,8 +1,8 @@
{
"models": {
"main": {
"provider": "anthropic",
"modelId": "claude-sonnet-4-20250514",
"provider": "openrouter",
"modelId": "qwen/qwen3-235b-a22b:free",
"maxTokens": 50000,
"temperature": 0.2
},

View File

@@ -0,0 +1,35 @@
# Task ID: 97
# Title: Create Taskmaster Jingle Implementation
# Status: pending
# Dependencies: 95, 57, 3, 2
# Priority: medium
# Description: Develop a musical jingle system for Taskmaster that plays sound effects during key CLI interactions to enhance user experience.
# Details:
This task involves implementing a sound system that plays audio cues during Taskmaster CLI operations. Key implementation steps include:
1. Audio System Integration:
- Research and select appropriate audio library compatible with Node.js CLI applications
- Implement cross-platform audio playback (Windows, macOS, Linux)
- Create sound configuration options in .taskmasterconfig
2. Jingle Design:
- Define sound triggers for key events (task creation, completion, errors, etc.)
- Create or source appropriate sound files (WAV/MP3 format)
- Implement volume control and mute option in settings
3. CLI Integration:
- Add sound playback to core CLI commands (init, create, update, delete)
- Implement optional sound effects toggle via command line flags
- Ensure audio playback doesn't interfere with CLI performance
4. Documentation:
- Update user guide with sound configuration instructions
- Add troubleshooting section for audio playback issues
# Test Strategy:
1. Verify audio plays correctly during each supported CLI operation
2. Test sound configuration options across different platforms
3. Confirm volume control and mute functionality works as expected
4. Validate that audio playback doesn't affect CLI performance
5. Test edge cases (no audio hardware, invalid sound files, etc.)
6. Ensure sound effects can be disabled via configuration and CLI flags

File diff suppressed because one or more lines are too long

View File

@@ -388,13 +388,6 @@
"allowed_roles": ["main", "fallback"],
"max_tokens": 24000
},
{
"id": "qwen/qwen3-235b-a22b:free",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 0.14, "output": 2 },
"allowed_roles": ["main", "fallback"],
"max_tokens": 24000
},
{
"id": "mistralai/mistral-small-3.1-24b-instruct:free",
"swe_score": 0,

View File

@@ -3,8 +3,8 @@
* Core functionality for managing AI model configurations
*/
import https from 'https';
import http from 'http';
import https from "https";
import http from "http";
import {
getMainModelId,
getResearchModelId,
@@ -19,10 +19,10 @@ import {
writeConfig,
isConfigFilePresent,
getAllProviders,
getBaseUrlForRole
} from '../config-manager.js';
import { findConfigPath } from '../../../src/utils/path-utils.js';
import { log } from '../utils.js';
getBaseUrlForRole,
} from "../config-manager.js";
import { findConfigPath } from "../../../src/utils/path-utils.js";
import { log } from "../utils.js";
/**
* Fetches the list of models from OpenRouter API.
@@ -31,26 +31,26 @@ import { log } from '../utils.js';
function fetchOpenRouterModels() {
return new Promise((resolve) => {
const options = {
hostname: 'openrouter.ai',
path: '/api/v1/models',
method: 'GET',
hostname: "openrouter.ai",
path: "/api/v1/models",
method: "GET",
headers: {
Accept: 'application/json'
}
Accept: "application/json",
},
};
const req = https.request(options, (res) => {
let data = '';
res.on('data', (chunk) => {
let data = "";
res.on("data", (chunk) => {
data += chunk;
});
res.on('end', () => {
res.on("end", () => {
if (res.statusCode === 200) {
try {
const parsedData = JSON.parse(data);
resolve(parsedData.data || []); // Return the array of models
} catch (e) {
console.error('Error parsing OpenRouter response:', e);
console.error("Error parsing OpenRouter response:", e);
resolve(null); // Indicate failure
}
} else {
@@ -62,8 +62,8 @@ function fetchOpenRouterModels() {
});
});
req.on('error', (e) => {
console.error('Error fetching OpenRouter models:', e);
req.on("error", (e) => {
console.error("Error fetching OpenRouter models:", e);
resolve(null); // Indicate failure
});
req.end();
@@ -75,14 +75,14 @@ function fetchOpenRouterModels() {
* @param {string} baseURL - The base URL for the Ollama API (e.g., "http://localhost:11434/api")
* @returns {Promise<Array|null>} A promise that resolves with the list of model objects or null if fetch fails.
*/
function fetchOllamaModels(baseURL = 'http://localhost:11434/api') {
function fetchOllamaModels(baseURL = "http://localhost:11434/api") {
return new Promise((resolve) => {
try {
// Parse the base URL to extract hostname, port, and base path
const url = new URL(baseURL);
const isHttps = url.protocol === 'https:';
const isHttps = url.protocol === "https:";
const port = url.port || (isHttps ? 443 : 80);
const basePath = url.pathname.endsWith('/')
const basePath = url.pathname.endsWith("/")
? url.pathname.slice(0, -1)
: url.pathname;
@@ -90,25 +90,25 @@ function fetchOllamaModels(baseURL = 'http://localhost:11434/api') {
hostname: url.hostname,
port: parseInt(port, 10),
path: `${basePath}/tags`,
method: 'GET',
method: "GET",
headers: {
Accept: 'application/json'
}
Accept: "application/json",
},
};
const requestLib = isHttps ? https : http;
const req = requestLib.request(options, (res) => {
let data = '';
res.on('data', (chunk) => {
let data = "";
res.on("data", (chunk) => {
data += chunk;
});
res.on('end', () => {
res.on("end", () => {
if (res.statusCode === 200) {
try {
const parsedData = JSON.parse(data);
resolve(parsedData.models || []); // Return the array of models
} catch (e) {
console.error('Error parsing Ollama response:', e);
console.error("Error parsing Ollama response:", e);
resolve(null); // Indicate failure
}
} else {
@@ -120,13 +120,13 @@ function fetchOllamaModels(baseURL = 'http://localhost:11434/api') {
});
});
req.on('error', (e) => {
console.error('Error fetching Ollama models:', e);
req.on("error", (e) => {
console.error("Error fetching Ollama models:", e);
resolve(null); // Indicate failure
});
req.end();
} catch (e) {
console.error('Error parsing Ollama base URL:', e);
console.error("Error parsing Ollama base URL:", e);
resolve(null); // Indicate failure
}
});
@@ -144,13 +144,13 @@ async function getModelConfiguration(options = {}) {
const { mcpLog, projectRoot, session } = options;
const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') {
if (mcpLog && typeof mcpLog[level] === "function") {
mcpLog[level](...args);
}
};
if (!projectRoot) {
throw new Error('Project root is required but not found.');
throw new Error("Project root is required but not found.");
}
// Use centralized config path finding instead of hardcoded path
@@ -158,11 +158,11 @@ async function getModelConfiguration(options = {}) {
const configExists = isConfigFilePresent(projectRoot);
log(
'debug',
"debug",
`Checking for config file using findConfigPath, found: ${configPath}`
);
log(
'debug',
"debug",
`Checking config file using isConfigFilePresent(), exists: ${configExists}`
);
@@ -221,8 +221,8 @@ async function getModelConfiguration(options = {}) {
cost: mainModelData?.cost_per_1m_tokens || null,
keyStatus: {
cli: mainCliKeyOk,
mcp: mainMcpKeyOk
}
mcp: mainMcpKeyOk,
},
},
research: {
provider: researchProvider,
@@ -231,8 +231,8 @@ async function getModelConfiguration(options = {}) {
cost: researchModelData?.cost_per_1m_tokens || null,
keyStatus: {
cli: researchCliKeyOk,
mcp: researchMcpKeyOk
}
mcp: researchMcpKeyOk,
},
},
fallback: fallbackProvider
? {
@@ -242,22 +242,22 @@ async function getModelConfiguration(options = {}) {
cost: fallbackModelData?.cost_per_1m_tokens || null,
keyStatus: {
cli: fallbackCliKeyOk,
mcp: fallbackMcpKeyOk
}
}
: null
mcp: fallbackMcpKeyOk,
},
message: 'Successfully retrieved current model configuration'
}
: null,
},
message: "Successfully retrieved current model configuration",
},
};
} catch (error) {
report('error', `Error getting model configuration: ${error.message}`);
report("error", `Error getting model configuration: ${error.message}`);
return {
success: false,
error: {
code: 'CONFIG_ERROR',
message: error.message
}
code: "CONFIG_ERROR",
message: error.message,
},
};
}
}
@@ -274,13 +274,13 @@ async function getAvailableModelsList(options = {}) {
const { mcpLog, projectRoot } = options;
const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') {
if (mcpLog && typeof mcpLog[level] === "function") {
mcpLog[level](...args);
}
};
if (!projectRoot) {
throw new Error('Project root is required but not found.');
throw new Error("Project root is required but not found.");
}
// Use centralized config path finding instead of hardcoded path
@@ -288,11 +288,11 @@ async function getAvailableModelsList(options = {}) {
const configExists = isConfigFilePresent(projectRoot);
log(
'debug',
"debug",
`Checking for config file using findConfigPath, found: ${configPath}`
);
log(
'debug',
"debug",
`Checking config file using isConfigFilePresent(), exists: ${configExists}`
);
@@ -311,8 +311,8 @@ async function getAvailableModelsList(options = {}) {
success: true,
data: {
models: [],
message: 'No available models found'
}
message: "No available models found",
},
};
}
@@ -326,28 +326,28 @@ async function getAvailableModelsList(options = {}) {
Boolean
);
const otherAvailableModels = allAvailableModels.map((model) => ({
provider: model.provider || 'N/A',
provider: model.provider || "N/A",
modelId: model.id,
sweScore: model.swe_score || null,
cost: model.cost_per_1m_tokens || null,
allowedRoles: model.allowed_roles || []
allowedRoles: model.allowed_roles || [],
}));
return {
success: true,
data: {
models: otherAvailableModels,
message: `Successfully retrieved ${otherAvailableModels.length} available models`
}
message: `Successfully retrieved ${otherAvailableModels.length} available models`,
},
};
} catch (error) {
report('error', `Error getting available models: ${error.message}`);
report("error", `Error getting available models: ${error.message}`);
return {
success: false,
error: {
code: 'MODELS_LIST_ERROR',
message: error.message
}
code: "MODELS_LIST_ERROR",
message: error.message,
},
};
}
}
@@ -367,13 +367,13 @@ async function setModel(role, modelId, options = {}) {
const { mcpLog, projectRoot, providerHint } = options;
const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') {
if (mcpLog && typeof mcpLog[level] === "function") {
mcpLog[level](...args);
}
};
if (!projectRoot) {
throw new Error('Project root is required but not found.');
throw new Error("Project root is required but not found.");
}
// Use centralized config path finding instead of hardcoded path
@@ -381,11 +381,11 @@ async function setModel(role, modelId, options = {}) {
const configExists = isConfigFilePresent(projectRoot);
log(
'debug',
"debug",
`Checking for config file using findConfigPath, found: ${configPath}`
);
log(
'debug',
"debug",
`Checking config file using isConfigFilePresent(), exists: ${configExists}`
);
@@ -396,24 +396,24 @@ async function setModel(role, modelId, options = {}) {
}
// Validate role
if (!['main', 'research', 'fallback'].includes(role)) {
if (!["main", "research", "fallback"].includes(role)) {
return {
success: false,
error: {
code: 'INVALID_ROLE',
message: `Invalid role: ${role}. Must be one of: main, research, fallback.`
}
code: "INVALID_ROLE",
message: `Invalid role: ${role}. Must be one of: main, research, fallback.`,
},
};
}
// Validate model ID
if (typeof modelId !== 'string' || modelId.trim() === '') {
if (typeof modelId !== "string" || modelId.trim() === "") {
return {
success: false,
error: {
code: 'INVALID_MODEL_ID',
message: `Invalid model ID: ${modelId}. Must be a non-empty string.`
}
code: "INVALID_MODEL_ID",
message: `Invalid model ID: ${modelId}. Must be a non-empty string.`,
},
};
}
@@ -434,33 +434,40 @@ async function setModel(role, modelId, options = {}) {
// Found internally AND provider matches the hint
determinedProvider = providerHint;
report(
'info',
"info",
`Model ${modelId} found internally with matching provider hint ${determinedProvider}.`
);
} else {
// Either not found internally, OR found but under a DIFFERENT provider than hinted.
// Proceed with custom logic based ONLY on the hint.
if (providerHint === 'openrouter') {
if (providerHint === "openrouter") {
// Check OpenRouter ONLY because hint was openrouter
report('info', `Checking OpenRouter for ${modelId} (as hinted)...`);
report("info", `Checking OpenRouter for ${modelId} (as hinted)...`);
const openRouterModels = await fetchOpenRouterModels();
if (
openRouterModels &&
openRouterModels.some((m) => m.id === modelId)
) {
determinedProvider = 'openrouter';
determinedProvider = "openrouter";
// Check if this is a free model (ends with :free)
if (modelId.endsWith(":free")) {
warningMessage = `Warning: OpenRouter free model '${modelId}' selected. Free models have significant limitations including lower context windows, reduced rate limits, and may not support advanced features like tool_use. Consider using the paid version '${modelId.replace(":free", "")}' for full functionality.`;
} else {
warningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`;
report('warn', warningMessage);
}
report("warn", warningMessage);
} else {
// Hinted as OpenRouter but not found in live check
throw new Error(
`Model ID "${modelId}" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.`
);
}
} else if (providerHint === 'ollama') {
} else if (providerHint === "ollama") {
// Check Ollama ONLY because hint was ollama
report('info', `Checking Ollama for ${modelId} (as hinted)...`);
report("info", `Checking Ollama for ${modelId} (as hinted)...`);
// Get the Ollama base URL from config
const ollamaBaseURL = getBaseUrlForRole(role, projectRoot);
@@ -472,9 +479,9 @@ async function setModel(role, modelId, options = {}) {
`Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`
);
} else if (ollamaModels.some((m) => m.model === modelId)) {
determinedProvider = 'ollama';
determinedProvider = "ollama";
warningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`;
report('warn', warningMessage);
report("warn", warningMessage);
} else {
// Server is running but model not found
const tagsUrl = `${ollamaBaseURL}/tags`;
@@ -482,11 +489,11 @@ async function setModel(role, modelId, options = {}) {
`Model ID "${modelId}" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`
);
}
} else if (providerHint === 'bedrock') {
} else if (providerHint === "bedrock") {
// Set provider without model validation since Bedrock models are managed by AWS
determinedProvider = 'bedrock';
determinedProvider = "bedrock";
warningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`;
report('warn', warningMessage);
report("warn", warningMessage);
} else {
// Invalid provider hint - should not happen
throw new Error(`Invalid provider hint received: ${providerHint}`);
@@ -498,7 +505,7 @@ async function setModel(role, modelId, options = {}) {
// Found internally, use the provider from the internal list
determinedProvider = modelData.provider;
report(
'info',
"info",
`Model ${modelId} found internally with provider ${determinedProvider}.`
);
} else {
@@ -506,9 +513,9 @@ async function setModel(role, modelId, options = {}) {
return {
success: false,
error: {
code: 'MODEL_NOT_FOUND_NO_HINT',
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.`
}
code: "MODEL_NOT_FOUND_NO_HINT",
message: `Model ID "${modelId}" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter or --ollama.`,
},
};
}
}
@@ -521,9 +528,9 @@ async function setModel(role, modelId, options = {}) {
return {
success: false,
error: {
code: 'PROVIDER_UNDETERMINED',
message: `Could not determine the provider for model ID "${modelId}".`
}
code: "PROVIDER_UNDETERMINED",
message: `Could not determine the provider for model ID "${modelId}".`,
},
};
}
@@ -531,7 +538,7 @@ async function setModel(role, modelId, options = {}) {
currentConfig.models[role] = {
...currentConfig.models[role], // Keep existing params like maxTokens
provider: determinedProvider,
modelId: modelId
modelId: modelId,
};
// Write updated configuration
@@ -540,14 +547,14 @@ async function setModel(role, modelId, options = {}) {
return {
success: false,
error: {
code: 'CONFIG_WRITE_ERROR',
message: 'Error writing updated configuration to configuration file'
}
code: "CONFIG_WRITE_ERROR",
message: "Error writing updated configuration to configuration file",
},
};
}
const successMessage = `Successfully set ${role} model to ${modelId} (Provider: ${determinedProvider})`;
report('info', successMessage);
report("info", successMessage);
return {
success: true,
@@ -556,17 +563,17 @@ async function setModel(role, modelId, options = {}) {
provider: determinedProvider,
modelId,
message: successMessage,
warning: warningMessage // Include warning in the response data
}
warning: warningMessage, // Include warning in the response data
},
};
} catch (error) {
report('error', `Error setting ${role} model: ${error.message}`);
report("error", `Error setting ${role} model: ${error.message}`);
return {
success: false,
error: {
code: 'SET_MODEL_ERROR',
message: error.message
}
code: "SET_MODEL_ERROR",
message: error.message,
},
};
}
}
@@ -582,7 +589,7 @@ async function setModel(role, modelId, options = {}) {
async function getApiKeyStatusReport(options = {}) {
const { mcpLog, projectRoot, session } = options;
const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') {
if (mcpLog && typeof mcpLog[level] === "function") {
mcpLog[level](...args);
}
};
@@ -590,7 +597,7 @@ async function getApiKeyStatusReport(options = {}) {
try {
const providers = getAllProviders();
const providersToCheck = providers.filter(
(p) => p.toLowerCase() !== 'ollama'
(p) => p.toLowerCase() !== "ollama"
); // Ollama is not a provider, it's a service, doesn't need an api key usually
const statusReport = providersToCheck.map((provider) => {
// Use provided projectRoot for MCP status check
@@ -599,26 +606,26 @@ async function getApiKeyStatusReport(options = {}) {
return {
provider,
cli: cliOk,
mcp: mcpOk
mcp: mcpOk,
};
});
report('info', 'Successfully generated API key status report.');
report("info", "Successfully generated API key status report.");
return {
success: true,
data: {
report: statusReport,
message: 'API key status report generated.'
}
message: "API key status report generated.",
},
};
} catch (error) {
report('error', `Error generating API key status report: ${error.message}`);
report("error", `Error generating API key status report: ${error.message}`);
return {
success: false,
error: {
code: 'API_KEY_STATUS_ERROR',
message: error.message
}
code: "API_KEY_STATUS_ERROR",
message: error.message,
},
};
}
}
@@ -627,5 +634,5 @@ export {
getModelConfiguration,
getAvailableModelsList,
setModel,
getApiKeyStatusReport
getApiKeyStatusReport,
};