feat(ai): Integrate OpenAI provider and enhance model config

- Add OpenAI provider implementation using @ai-sdk/openai.\n- Update `models` command/tool to display API key status for configured providers.\n- Implement model-specific `maxTokens` override logic in `config-manager.js` using `supported-models.json`.\n- Improve AI error message parsing in `ai-services-unified.js` for better clarity.
This commit is contained in:
Eyal Toledano
2025-04-27 03:56:23 -04:00
parent 842eaf7224
commit 2517bc112c
21 changed files with 1350 additions and 662 deletions

View File

@@ -0,0 +1,7 @@
---
'task-master-ai': minor
---
Feat: Integrate OpenAI as a new AI provider.
Feat: Enhance `models` command/tool to display API key status.
Feat: Implement model-specific `maxTokens` override based on `supported-models.json` to save you if you use an incorrect max token value.

View File

@@ -48,7 +48,7 @@ This rule guides AI assistants on how to view, configure, and interact with the
- **`mistral`**: Requires `MISTRAL_API_KEY`.
- **`azure`**: Requires `AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT`.
- **`openrouter`**: Requires `OPENROUTER_API_KEY`.
- **`ollama`**: Typically requires `OLLAMA_API_KEY` *and* `OLLAMA_BASE_URL` (default: `http://localhost:11434/api`). *Check specific setup.*
- **`ollama`**: Might require `OLLAMA_API_KEY` (not currently supported) *and* `OLLAMA_BASE_URL` (default: `http://localhost:11434/api`). *Check specific setup.*
- **Troubleshooting:**
- If AI commands fail (especially in MCP context):

View File

@@ -7,4 +7,3 @@ MISTRAL_API_KEY=YOUR_MISTRAL_KEY_HERE
OPENROUTER_API_KEY=YOUR_OPENROUTER_KEY_HERE
XAI_API_KEY=YOUR_XAI_KEY_HERE
AZURE_OPENAI_API_KEY=YOUR_AZURE_KEY_HERE
OLLAMA_API_KEY=YOUR_OLLAMA_KEY_HERE

View File

@@ -1,9 +1,9 @@
{
"models": {
"main": {
"provider": "google",
"modelId": "gemini-2.5-pro-exp-03-25",
"maxTokens": 120000,
"provider": "openai",
"modelId": "o3-mini",
"maxTokens": 100000,
"temperature": 0.2
},
"research": {
@@ -14,7 +14,7 @@
},
"fallback": {
"provider": "anthropic",
"modelId": "claude-3-5-sonnet-20241022",
"modelId": "claude-3-7-sonnet-20250219",
"maxTokens": 120000,
"temperature": 0.2
}

View File

@@ -42,8 +42,7 @@ npm i -g task-master-ai
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
"OLLAMA_API_KEY": "YOUR_OLLAMA_KEY_HERE"
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE"
}
}
}

View File

@@ -5,5 +5,4 @@ OPENAI_API_KEY=your_openai_api_key_here # Optional, for OpenAI/OpenR
GOOGLE_API_KEY=your_google_api_key_here # Optional, for Google Gemini models.
MISTRAL_API_KEY=your_mistral_key_here # Optional, for Mistral AI models.
XAI_API_KEY=YOUR_XAI_KEY_HERE # Optional, for xAI AI models.
AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).
OLLAMA_API_KEY=YOUR_OLLAMA_KEY_HERE # Optional, for local Ollama AI models (requires endpoint in .taskmasterconfig).
AZURE_OPENAI_API_KEY=your_azure_key_here # Optional, for Azure OpenAI models (requires endpoint in .taskmasterconfig).

View File

@@ -32,8 +32,7 @@ npm i -g task-master-ai
"MISTRAL_API_KEY": "YOUR_MISTRAL_KEY_HERE",
"OPENROUTER_API_KEY": "YOUR_OPENROUTER_KEY_HERE",
"XAI_API_KEY": "YOUR_XAI_KEY_HERE",
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE",
"OLLAMA_API_KEY": "YOUR_OLLAMA_KEY_HERE"
"AZURE_OPENAI_API_KEY": "YOUR_AZURE_KEY_HERE"
}
}
}

24
package-lock.json generated
View File

@@ -13,7 +13,7 @@
"@ai-sdk/azure": "^1.3.17",
"@ai-sdk/google": "^1.2.13",
"@ai-sdk/mistral": "^1.2.7",
"@ai-sdk/openai": "^1.3.16",
"@ai-sdk/openai": "^1.3.20",
"@ai-sdk/perplexity": "^1.1.7",
"@ai-sdk/xai": "^1.2.13",
"@anthropic-ai/sdk": "^0.39.0",
@@ -90,6 +90,22 @@
"zod": "^3.0.0"
}
},
"node_modules/@ai-sdk/azure/node_modules/@ai-sdk/openai": {
"version": "1.3.16",
"resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.16.tgz",
"integrity": "sha512-pjtiBKt1GgaSKZryTbM3tqgoegJwgAUlp1+X5uN6T+VPnI4FLSymV65tyloWzDlyqZmi9HXnnSRPu76VoL5D5g==",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/provider": "1.1.3",
"@ai-sdk/provider-utils": "2.2.7"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"zod": "^3.0.0"
}
},
"node_modules/@ai-sdk/google": {
"version": "1.2.13",
"resolved": "https://registry.npmjs.org/@ai-sdk/google/-/google-1.2.13.tgz",
@@ -123,9 +139,9 @@
}
},
"node_modules/@ai-sdk/openai": {
"version": "1.3.16",
"resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.16.tgz",
"integrity": "sha512-pjtiBKt1GgaSKZryTbM3tqgoegJwgAUlp1+X5uN6T+VPnI4FLSymV65tyloWzDlyqZmi9HXnnSRPu76VoL5D5g==",
"version": "1.3.20",
"resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.20.tgz",
"integrity": "sha512-/DflUy7ROG9k6n6YTXMBFPbujBKnbGY58f3CwvicLvDar9nDAloVnUWd3LUoOxpSVnX8vtQ7ngxF52SLWO6RwQ==",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/provider": "1.1.3",

View File

@@ -42,7 +42,7 @@
"@ai-sdk/azure": "^1.3.17",
"@ai-sdk/google": "^1.2.13",
"@ai-sdk/mistral": "^1.2.7",
"@ai-sdk/openai": "^1.3.16",
"@ai-sdk/openai": "^1.3.20",
"@ai-sdk/perplexity": "^1.1.7",
"@ai-sdk/xai": "^1.2.13",
"@anthropic-ai/sdk": "^0.39.0",

View File

@@ -25,7 +25,8 @@ import { log, resolveEnvVariable } from './utils.js';
import * as anthropic from '../../src/ai-providers/anthropic.js';
import * as perplexity from '../../src/ai-providers/perplexity.js';
import * as google from '../../src/ai-providers/google.js'; // Import Google provider
// TODO: Import other provider modules when implemented (openai, ollama, etc.)
import * as openai from '../../src/ai-providers/openai.js'; // ADD: Import OpenAI provider
// TODO: Import other provider modules when implemented (ollama, etc.)
// --- Provider Function Map ---
// Maps provider names (lowercase) to their respective service functions
@@ -47,8 +48,14 @@ const PROVIDER_FUNCTIONS = {
generateText: google.generateGoogleText,
streamText: google.streamGoogleText,
generateObject: google.generateGoogleObject
},
openai: {
// ADD: OpenAI entry
generateText: openai.generateOpenAIText,
streamText: openai.streamOpenAIText,
generateObject: openai.generateOpenAIObject
}
// TODO: Add entries for openai, ollama, etc. when implemented
// TODO: Add entries for ollama, etc. when implemented
};
// --- Configuration for Retries ---
@@ -71,6 +78,54 @@ function isRetryableError(error) {
);
}
/**
* Extracts a user-friendly error message from a potentially complex AI error object.
* Prioritizes nested messages and falls back to the top-level message.
* @param {Error | object | any} error - The error object.
* @returns {string} A concise error message.
*/
function _extractErrorMessage(error) {
try {
// Attempt 1: Look for Vercel SDK specific nested structure (common)
if (error?.data?.error?.message) {
return error.data.error.message;
}
// Attempt 2: Look for nested error message directly in the error object
if (error?.error?.message) {
return error.error.message;
}
// Attempt 3: Look for nested error message in response body if it's JSON string
if (typeof error?.responseBody === 'string') {
try {
const body = JSON.parse(error.responseBody);
if (body?.error?.message) {
return body.error.message;
}
} catch (parseError) {
// Ignore if responseBody is not valid JSON
}
}
// Attempt 4: Use the top-level message if it exists
if (typeof error?.message === 'string' && error.message) {
return error.message;
}
// Attempt 5: Handle simple string errors
if (typeof error === 'string') {
return error;
}
// Fallback
return 'An unknown AI service error occurred.';
} catch (e) {
// Safety net
return 'Failed to extract error message.';
}
}
/**
* Internal helper to resolve the API key for a given provider.
* @param {string} providerName - The name of the provider (lowercase).
@@ -87,8 +142,7 @@ function _resolveApiKey(providerName, session) {
mistral: 'MISTRAL_API_KEY',
azure: 'AZURE_OPENAI_API_KEY',
openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY',
ollama: 'OLLAMA_API_KEY'
xai: 'XAI_API_KEY'
};
// Double check this -- I have had to use an api key for ollama in the past
@@ -211,6 +265,8 @@ async function _unifiedServiceRunner(serviceType, params) {
}
let lastError = null;
let lastCleanErrorMessage =
'AI service call failed for all configured roles.';
for (const currentRole of sequence) {
let providerName, modelId, apiKey, roleParams, providerFnSet, providerApiFn;
@@ -344,23 +400,21 @@ async function _unifiedServiceRunner(serviceType, params) {
return result; // Return original result for other cases
} catch (error) {
const cleanMessage = _extractErrorMessage(error); // Extract clean message
log(
'error', // Log as error since this role attempt failed
`Service call failed for role ${currentRole} (Provider: ${providerName || 'unknown'}): ${error.message}`
`Service call failed for role ${currentRole} (Provider: ${providerName || 'unknown'}): ${cleanMessage}` // Log the clean message
);
lastError = error; // Store the error to throw if all roles fail
// Log reason and continue (handled within the loop now)
lastError = error; // Store the original error for potential debugging
lastCleanErrorMessage = cleanMessage; // Store the clean message for final throw
// Continue to the next role in the sequence
}
}
// If loop completes, all roles failed
log('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);
throw (
lastError ||
new Error(
`AI service call (${serviceType}) failed for all configured roles in the sequence.`
)
);
// Throw a new error with the cleaner message from the last failure
throw new Error(lastCleanErrorMessage);
}
/**

File diff suppressed because it is too large Load Diff

View File

@@ -255,8 +255,6 @@ function getModelConfigForRole(role, explicitRoot = null) {
const config = getConfig(explicitRoot);
const roleConfig = config?.models?.[role];
if (!roleConfig) {
// This shouldn't happen if _loadAndValidateConfig ensures defaults
// But as a safety net, log and return defaults
log(
'warn',
`No model configuration found for role: ${role}. Returning default.`
@@ -363,16 +361,64 @@ function getOllamaBaseUrl(explicitRoot = null) {
}
/**
* Gets model parameters (maxTokens, temperature) for a specific role.
* Gets model parameters (maxTokens, temperature) for a specific role,
* considering model-specific overrides from supported-models.json.
* @param {string} role - The role ('main', 'research', 'fallback').
* @param {string|null} explicitRoot - Optional explicit path to the project root.
* @returns {{maxTokens: number, temperature: number}}
*/
function getParametersForRole(role, explicitRoot = null) {
const roleConfig = getModelConfigForRole(role, explicitRoot);
const roleMaxTokens = roleConfig.maxTokens;
const roleTemperature = roleConfig.temperature;
const modelId = roleConfig.modelId;
const providerName = roleConfig.provider;
let effectiveMaxTokens = roleMaxTokens; // Start with the role's default
try {
// Find the model definition in MODEL_MAP
const providerModels = MODEL_MAP[providerName];
if (providerModels && Array.isArray(providerModels)) {
const modelDefinition = providerModels.find((m) => m.id === modelId);
// Check if a model-specific max_tokens is defined and valid
if (
modelDefinition &&
typeof modelDefinition.max_tokens === 'number' &&
modelDefinition.max_tokens > 0
) {
const modelSpecificMaxTokens = modelDefinition.max_tokens;
// Use the minimum of the role default and the model specific limit
effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens);
log(
'debug',
`Applying model-specific max_tokens (${modelSpecificMaxTokens}) for ${modelId}. Effective limit: ${effectiveMaxTokens}`
);
} else {
log(
'debug',
`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`
);
}
} else {
log(
'debug',
`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`
);
}
} catch (lookupError) {
log(
'warn',
`Error looking up model-specific max_tokens for ${modelId}: ${lookupError.message}. Using role default: ${roleMaxTokens}`
);
// Fallback to role default on error
effectiveMaxTokens = roleMaxTokens;
}
return {
maxTokens: roleConfig.maxTokens,
temperature: roleConfig.temperature
maxTokens: effectiveMaxTokens,
temperature: roleTemperature
};
}
@@ -385,16 +431,19 @@ function getParametersForRole(role, explicitRoot = null) {
*/
function isApiKeySet(providerName, session = null) {
// Define the expected environment variable name for each provider
if (providerName?.toLowerCase() === 'ollama') {
return true; // Indicate key status is effectively "OK"
}
const keyMap = {
openai: 'OPENAI_API_KEY',
anthropic: 'ANTHROPIC_API_KEY',
google: 'GOOGLE_API_KEY',
perplexity: 'PERPLEXITY_API_KEY',
mistral: 'MISTRAL_API_KEY',
azure: 'AZURE_OPENAI_API_KEY', // Azure needs endpoint too, but key presence is a start
azure: 'AZURE_OPENAI_API_KEY',
openrouter: 'OPENROUTER_API_KEY',
xai: 'XAI_API_KEY',
ollama: 'OLLAMA_API_KEY'
xai: 'XAI_API_KEY'
// Add other providers as needed
};
@@ -405,8 +454,15 @@ function isApiKeySet(providerName, session = null) {
}
const envVarName = keyMap[providerKey];
// Use resolveEnvVariable to check both process.env and session.env
return !!resolveEnvVariable(envVarName, session);
const apiKeyValue = resolveEnvVariable(envVarName, session);
// Check if the key exists, is not empty, and is not a placeholder
return (
apiKeyValue &&
apiKeyValue.trim() !== '' &&
!/YOUR_.*_API_KEY_HERE/.test(apiKeyValue) && // General placeholder check
!apiKeyValue.includes('KEY_HERE')
); // Another common placeholder pattern
}
/**
@@ -482,7 +538,7 @@ function getMcpApiKeyStatus(providerName, projectRoot = null) {
return false; // Unknown provider
}
return !!apiKeyToCheck && apiKeyToCheck !== placeholderValue;
return !!apiKeyToCheck && !/KEY_HERE$/.test(apiKeyToCheck);
} catch (error) {
console.error(
chalk.red(`Error reading or parsing .cursor/mcp.json: ${error.message}`)
@@ -589,6 +645,14 @@ function isConfigFilePresent(explicitRoot = null) {
return fs.existsSync(configPath);
}
/**
* Gets a list of all provider names defined in the MODEL_MAP.
* @returns {string[]} An array of provider names.
*/
function getAllProviders() {
return Object.keys(MODEL_MAP || {});
}
export {
// Core config access
getConfig,
@@ -628,5 +692,8 @@ export {
// API Key Checkers (still relevant)
isApiKeySet,
getMcpApiKeyStatus
getMcpApiKeyStatus,
// ADD: Function to get all provider names
getAllProviders
};

View File

@@ -4,25 +4,29 @@
"id": "claude-3-7-sonnet-20250219",
"swe_score": 0.623,
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
"allowed_roles": ["main", "fallback"]
"allowed_roles": ["main", "fallback"],
"max_tokens": 120000
},
{
"id": "claude-3-5-sonnet-20241022",
"swe_score": 0.49,
"cost_per_1m_tokens": { "input": 3.0, "output": 15.0 },
"allowed_roles": ["main", "fallback"]
"allowed_roles": ["main", "fallback"],
"max_tokens": 64000
},
{
"id": "claude-3-5-haiku-20241022",
"swe_score": 0.406,
"cost_per_1m_tokens": { "input": 0.8, "output": 4.0 },
"allowed_roles": ["main", "fallback"]
"allowed_roles": ["main", "fallback"],
"max_tokens": 64000
},
{
"id": "claude-3-opus-20240229",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 15, "output": 75 },
"allowed_roles": ["main", "fallback"]
"allowed_roles": ["main", "fallback"],
"max_tokens": 64000
}
],
"openai": [
@@ -48,7 +52,8 @@
"id": "o3-mini",
"swe_score": 0.493,
"cost_per_1m_tokens": { "input": 1.1, "output": 4.4 },
"allowed_roles": ["main", "fallback"]
"allowed_roles": ["main", "fallback"],
"max_tokens": 100000
},
{
"id": "o4-mini",
@@ -68,12 +73,6 @@
"cost_per_1m_tokens": { "input": 150.0, "output": 600.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gpt-4-1",
"swe_score": 0.55,
"cost_per_1m_tokens": { "input": 2.0, "output": 8.0 },
"allowed_roles": ["main", "fallback"]
},
{
"id": "gpt-4-5-preview",
"swe_score": 0.38,
@@ -148,31 +147,36 @@
"id": "sonar-pro",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 3, "output": 15 },
"allowed_roles": ["research"]
"allowed_roles": ["research"],
"max_tokens": 8700
},
{
"id": "sonar",
"swe_score": 0,
"cost_per_1m_tokens": { "input": 1, "output": 1 },
"allowed_roles": ["research"]
"allowed_roles": ["research"],
"max_tokens": 8700
},
{
"id": "deep-research",
"swe_score": 0.211,
"cost_per_1m_tokens": { "input": 2, "output": 8 },
"allowed_roles": ["research"]
"allowed_roles": ["research"],
"max_tokens": 8700
},
{
"id": "sonar-reasoning-pro",
"swe_score": 0.211,
"cost_per_1m_tokens": { "input": 2, "output": 8 },
"allowed_roles": ["main", "fallback"]
"allowed_roles": ["main", "fallback"],
"max_tokens": 8700
},
{
"id": "sonar-reasoning",
"swe_score": 0.211,
"cost_per_1m_tokens": { "input": 1, "output": 5 },
"allowed_roles": ["main", "fallback"]
"allowed_roles": ["main", "fallback"],
"max_tokens": 8700
}
],
"ollama": [

View File

@@ -17,7 +17,8 @@ import {
getMcpApiKeyStatus,
getConfig,
writeConfig,
isConfigFilePresent
isConfigFilePresent,
getAllProviders
} from '../config-manager.js';
/**
@@ -382,4 +383,61 @@ async function setModel(role, modelId, options = {}) {
}
}
export { getModelConfiguration, getAvailableModelsList, setModel };
/**
* Get API key status for all known providers.
* @param {Object} [options] - Options for the operation
* @param {Object} [options.session] - Session object containing environment variables (for MCP)
* @param {Function} [options.mcpLog] - MCP logger object (for MCP)
* @param {string} [options.projectRoot] - Project root directory
* @returns {Object} RESTful response with API key status report
*/
async function getApiKeyStatusReport(options = {}) {
const { mcpLog, projectRoot, session } = options;
const report = (level, ...args) => {
if (mcpLog && typeof mcpLog[level] === 'function') {
mcpLog[level](...args);
}
};
try {
const providers = getAllProviders();
const providersToCheck = providers.filter(
(p) => p.toLowerCase() !== 'ollama'
); // Ollama is not a provider, it's a service, doesn't need an api key usually
const statusReport = providersToCheck.map((provider) => {
// Use provided projectRoot for MCP status check
const cliOk = isApiKeySet(provider, session); // Pass session for CLI check too
const mcpOk = getMcpApiKeyStatus(provider, projectRoot);
return {
provider,
cli: cliOk,
mcp: mcpOk
};
});
report('info', 'Successfully generated API key status report.');
return {
success: true,
data: {
report: statusReport,
message: 'API key status report generated.'
}
};
} catch (error) {
report('error', `Error generating API key status report: ${error.message}`);
return {
success: false,
error: {
code: 'API_KEY_STATUS_ERROR',
message: error.message
}
};
}
}
export {
getModelConfiguration,
getAvailableModelsList,
setModel,
getApiKeyStatusReport
};

View File

@@ -1814,6 +1814,210 @@ async function confirmTaskOverwrite(tasksPath) {
return answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes';
}
/**
* Displays the API key status for different providers.
* @param {Array<{provider: string, cli: boolean, mcp: boolean}>} statusReport - The report generated by getApiKeyStatusReport.
*/
function displayApiKeyStatus(statusReport) {
if (!statusReport || statusReport.length === 0) {
console.log(chalk.yellow('No API key status information available.'));
return;
}
const table = new Table({
head: [
chalk.cyan('Provider'),
chalk.cyan('CLI Key (.env)'),
chalk.cyan('MCP Key (mcp.json)')
],
colWidths: [15, 20, 25],
chars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' }
});
statusReport.forEach(({ provider, cli, mcp }) => {
const cliStatus = cli ? chalk.green('✅ Found') : chalk.red('❌ Missing');
const mcpStatus = mcp ? chalk.green('✅ Found') : chalk.red('❌ Missing');
// Capitalize provider name for display
const providerName = provider.charAt(0).toUpperCase() + provider.slice(1);
table.push([providerName, cliStatus, mcpStatus]);
});
console.log(chalk.bold('\n🔑 API Key Status:'));
console.log(table.toString());
console.log(
chalk.gray(
' Note: Some providers (e.g., Azure, Ollama) may require additional endpoint configuration in .taskmasterconfig.'
)
);
}
// --- Formatting Helpers (Potentially move some to utils.js if reusable) ---
const formatSweScoreWithTertileStars = (score, allModels) => {
// ... (Implementation from previous version or refine) ...
if (score === null || score === undefined || score <= 0) return 'N/A';
const formattedPercentage = `${(score * 100).toFixed(1)}%`;
const validScores = allModels
.map((m) => m.sweScore)
.filter((s) => s !== null && s !== undefined && s > 0);
const sortedScores = [...validScores].sort((a, b) => b - a);
const n = sortedScores.length;
let stars = chalk.gray('☆☆☆');
if (n > 0) {
const topThirdIndex = Math.max(0, Math.floor(n / 3) - 1);
const midThirdIndex = Math.max(0, Math.floor((2 * n) / 3) - 1);
if (score >= sortedScores[topThirdIndex]) stars = chalk.yellow('★★★');
else if (score >= sortedScores[midThirdIndex])
stars = chalk.yellow('★★') + chalk.gray('☆');
else stars = chalk.yellow('★') + chalk.gray('☆☆');
}
return `${formattedPercentage} ${stars}`;
};
const formatCost = (costObj) => {
// ... (Implementation from previous version or refine) ...
if (!costObj) return 'N/A';
if (costObj.input === 0 && costObj.output === 0) {
return chalk.green('Free');
}
const formatSingleCost = (costValue) => {
if (costValue === null || costValue === undefined) return 'N/A';
const isInteger = Number.isInteger(costValue);
return `$${costValue.toFixed(isInteger ? 0 : 2)}`;
};
return `${formatSingleCost(costObj.input)} in, ${formatSingleCost(costObj.output)} out`;
};
// --- Display Functions ---
/**
* Displays the currently configured active models.
* @param {ConfigData} configData - The active configuration data.
* @param {AvailableModel[]} allAvailableModels - Needed for SWE score tertiles.
*/
function displayModelConfiguration(configData, allAvailableModels = []) {
console.log(chalk.cyan.bold('\nActive Model Configuration:'));
const active = configData.activeModels;
const activeTable = new Table({
head: [
'Role',
'Provider',
'Model ID',
'SWE Score',
'Cost ($/1M tkns)'
// 'API Key Status' // Removed, handled by separate displayApiKeyStatus
].map((h) => chalk.cyan.bold(h)),
colWidths: [10, 14, 30, 18, 20 /*, 28 */], // Adjusted widths
style: { head: ['cyan', 'bold'] }
});
activeTable.push([
chalk.white('Main'),
active.main.provider,
active.main.modelId,
formatSweScoreWithTertileStars(active.main.sweScore, allAvailableModels),
formatCost(active.main.cost)
// getCombinedStatus(active.main.keyStatus) // Removed
]);
activeTable.push([
chalk.white('Research'),
active.research.provider,
active.research.modelId,
formatSweScoreWithTertileStars(
active.research.sweScore,
allAvailableModels
),
formatCost(active.research.cost)
// getCombinedStatus(active.research.keyStatus) // Removed
]);
if (active.fallback && active.fallback.provider && active.fallback.modelId) {
activeTable.push([
chalk.white('Fallback'),
active.fallback.provider,
active.fallback.modelId,
formatSweScoreWithTertileStars(
active.fallback.sweScore,
allAvailableModels
),
formatCost(active.fallback.cost)
// getCombinedStatus(active.fallback.keyStatus) // Removed
]);
} else {
activeTable.push([
chalk.white('Fallback'),
chalk.gray('-'),
chalk.gray('(Not Set)'),
chalk.gray('-'),
chalk.gray('-')
// chalk.gray('-') // Removed
]);
}
console.log(activeTable.toString());
}
/**
* Displays the list of available models not currently configured.
* @param {AvailableModel[]} availableModels - List of available models.
*/
function displayAvailableModels(availableModels) {
if (!availableModels || availableModels.length === 0) {
console.log(
chalk.gray('\n(No other models available or all are configured)')
);
return;
}
console.log(chalk.cyan.bold('\nOther Available Models:'));
const availableTable = new Table({
head: ['Provider', 'Model ID', 'SWE Score', 'Cost ($/1M tkns)'].map((h) =>
chalk.cyan.bold(h)
),
colWidths: [15, 40, 18, 25],
style: { head: ['cyan', 'bold'] }
});
availableModels.forEach((model) => {
availableTable.push([
model.provider,
model.modelId,
formatSweScoreWithTertileStars(model.sweScore, availableModels), // Pass itself for comparison
formatCost(model.cost)
]);
});
console.log(availableTable.toString());
// --- Suggested Actions Section (moved here from models command) ---
console.log(
boxen(
chalk.white.bold('Next Steps:') +
'\n' +
chalk.cyan(
`1. Set main model: ${chalk.yellow('task-master models --set-main <model_id>')}`
) +
'\n' +
chalk.cyan(
`2. Set research model: ${chalk.yellow('task-master models --set-research <model_id>')}`
) +
'\n' +
chalk.cyan(
`3. Set fallback model: ${chalk.yellow('task-master models --set-fallback <model_id>')}`
) +
'\n' +
chalk.cyan(
`4. Run interactive setup: ${chalk.yellow('task-master models --setup')}`
),
{
padding: 1,
borderColor: 'yellow',
borderStyle: 'round',
margin: { top: 1 }
}
)
);
}
// Export UI functions
export {
displayBanner,
@@ -1828,5 +2032,8 @@ export {
displayTaskById,
displayComplexityReport,
generateComplexityAnalysisPrompt,
confirmTaskOverwrite
confirmTaskOverwrite,
displayApiKeyStatus,
displayModelConfiguration,
displayAvailableModels
};

176
src/ai-providers/openai.js Normal file
View File

@@ -0,0 +1,176 @@
import { createOpenAI, openai } from '@ai-sdk/openai'; // Using openai provider from Vercel AI SDK
import { generateText, streamText, generateObject } from 'ai'; // Import necessary functions from 'ai'
import { log } from '../../scripts/modules/utils.js';
/**
* Generates text using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
* @returns {Promise<string>} The generated text content.
* @throws {Error} If API call fails.
*/
export async function generateOpenAIText(params) {
const { apiKey, modelId, messages, maxTokens, temperature } = params;
log('debug', `generateOpenAIText called with model: ${modelId}`);
if (!apiKey) {
throw new Error('OpenAI API key is required.');
}
if (!modelId) {
throw new Error('OpenAI Model ID is required.');
}
if (!messages || !Array.isArray(messages) || messages.length === 0) {
throw new Error('Invalid or empty messages array provided for OpenAI.');
}
const openaiClient = createOpenAI({ apiKey });
try {
const result = await openaiClient.chat(messages, {
// Updated: Use openaiClient.chat directly
model: modelId,
max_tokens: maxTokens,
temperature
});
// Adjust based on actual Vercel SDK response structure for openaiClient.chat
// This might need refinement based on testing the SDK's output.
const textContent = result?.choices?.[0]?.message?.content?.trim();
if (!textContent) {
log(
'warn',
'OpenAI generateText response did not contain expected content.',
{ result }
);
throw new Error('Failed to extract content from OpenAI response.');
}
log(
'debug',
`OpenAI generateText completed successfully for model: ${modelId}`
);
return textContent;
} catch (error) {
log(
'error',
`Error in generateOpenAIText (Model: ${modelId}): ${error.message}`,
{ error }
);
throw new Error(
`OpenAI API error during text generation: ${error.message}`
);
}
}
/**
* Streams text using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, maxTokens, temperature.
* @returns {Promise<ReadableStream>} A readable stream of text deltas.
* @throws {Error} If API call fails.
*/
export async function streamOpenAIText(params) {
const { apiKey, modelId, messages, maxTokens, temperature } = params;
log('debug', `streamOpenAIText called with model: ${modelId}`);
if (!apiKey) {
throw new Error('OpenAI API key is required.');
}
if (!modelId) {
throw new Error('OpenAI Model ID is required.');
}
if (!messages || !Array.isArray(messages) || messages.length === 0) {
throw new Error(
'Invalid or empty messages array provided for OpenAI streaming.'
);
}
const openaiClient = createOpenAI({ apiKey });
try {
// Use the streamText function from Vercel AI SDK core
const stream = await openaiClient.chat.stream(messages, {
// Updated: Use openaiClient.chat.stream
model: modelId,
max_tokens: maxTokens,
temperature
});
log(
'debug',
`OpenAI streamText initiated successfully for model: ${modelId}`
);
// The Vercel SDK's streamText should directly return the stream object
return stream;
} catch (error) {
log(
'error',
`Error initiating OpenAI stream (Model: ${modelId}): ${error.message}`,
{ error }
);
throw new Error(
`OpenAI API error during streaming initiation: ${error.message}`
);
}
}
/**
* Generates structured objects using OpenAI models via Vercel AI SDK.
*
* @param {object} params - Parameters including apiKey, modelId, messages, schema, objectName, maxTokens, temperature.
* @returns {Promise<object>} The generated object matching the schema.
* @throws {Error} If API call fails or object generation fails.
*/
export async function generateOpenAIObject(params) {
const {
apiKey,
modelId,
messages,
schema,
objectName,
maxTokens,
temperature
} = params;
log(
'debug',
`generateOpenAIObject called with model: ${modelId}, object: ${objectName}`
);
if (!apiKey) throw new Error('OpenAI API key is required.');
if (!modelId) throw new Error('OpenAI Model ID is required.');
if (!messages || !Array.isArray(messages) || messages.length === 0)
throw new Error('Invalid messages array for OpenAI object generation.');
if (!schema)
throw new Error('Schema is required for OpenAI object generation.');
if (!objectName)
throw new Error('Object name is required for OpenAI object generation.');
const openaiClient = createOpenAI({ apiKey });
try {
// Use the imported generateObject function from 'ai' package
const result = await generateObject({
model: openaiClient(modelId),
schema: schema,
messages: messages,
mode: 'tool',
maxTokens: maxTokens,
temperature: temperature
});
log(
'debug',
`OpenAI generateObject completed successfully for model: ${modelId}`
);
return result.object;
} catch (error) {
log(
'error',
`Error in generateOpenAIObject (Model: ${modelId}, Object: ${objectName}): ${error.message}`,
{ error }
);
throw new Error(
`OpenAI API error during object generation: ${error.message}`
);
}
}

View File

@@ -1,6 +1,6 @@
# Task ID: 35
# Title: Integrate Grok3 API for Research Capabilities
# Status: pending
# Status: cancelled
# Dependencies: None
# Priority: medium
# Description: Replace the current Perplexity API integration with Grok3 API for all research-related functionalities while maintaining existing feature parity.

View File

@@ -1336,12 +1336,257 @@ When testing the non-streaming `generateTextService` call in `updateSubtaskById`
### Details:
## 22. Implement `openai.js` Provider Module using Vercel AI SDK [deferred]
## 22. Implement `openai.js` Provider Module using Vercel AI SDK [in-progress]
### Dependencies: None
### Description: Create and implement the `openai.js` module within `src/ai-providers/`. This module should contain functions to interact with the OpenAI API (streaming and non-streaming) using the **Vercel AI SDK**, adhering to the standardized input/output format defined for `ai-services-unified.js`. (Optional, implement if OpenAI models are needed).
### Details:
<info added on 2025-04-27T05:33:49.977Z>
```javascript
// Implementation details for openai.js provider module
import { createOpenAI } from 'ai';
/**
* Generates text using OpenAI models via Vercel AI SDK
*
* @param {Object} params - Configuration parameters
* @param {string} params.apiKey - OpenAI API key
* @param {string} params.modelId - Model ID (e.g., 'gpt-4', 'gpt-3.5-turbo')
* @param {Array} params.messages - Array of message objects with role and content
* @param {number} [params.maxTokens] - Maximum tokens to generate
* @param {number} [params.temperature=0.7] - Sampling temperature (0-1)
* @returns {Promise<string>} The generated text response
*/
export async function generateOpenAIText(params) {
try {
const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params;
if (!apiKey) throw new Error('OpenAI API key is required');
if (!modelId) throw new Error('Model ID is required');
if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required');
const openai = createOpenAI({ apiKey });
const response = await openai.chat.completions.create({
model: modelId,
messages,
max_tokens: maxTokens,
temperature,
});
return response.choices[0].message.content;
} catch (error) {
console.error('OpenAI text generation error:', error);
throw new Error(`OpenAI API error: ${error.message}`);
}
}
/**
* Streams text using OpenAI models via Vercel AI SDK
*
* @param {Object} params - Configuration parameters (same as generateOpenAIText)
* @returns {ReadableStream} A stream of text chunks
*/
export async function streamOpenAIText(params) {
try {
const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params;
if (!apiKey) throw new Error('OpenAI API key is required');
if (!modelId) throw new Error('Model ID is required');
if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required');
const openai = createOpenAI({ apiKey });
const stream = await openai.chat.completions.create({
model: modelId,
messages,
max_tokens: maxTokens,
temperature,
stream: true,
});
return stream;
} catch (error) {
console.error('OpenAI streaming error:', error);
throw new Error(`OpenAI streaming error: ${error.message}`);
}
}
/**
* Generates a structured object using OpenAI models via Vercel AI SDK
*
* @param {Object} params - Configuration parameters
* @param {string} params.apiKey - OpenAI API key
* @param {string} params.modelId - Model ID (e.g., 'gpt-4', 'gpt-3.5-turbo')
* @param {Array} params.messages - Array of message objects
* @param {Object} params.schema - JSON schema for the response object
* @param {string} params.objectName - Name of the object to generate
* @returns {Promise<Object>} The generated structured object
*/
export async function generateOpenAIObject(params) {
try {
const { apiKey, modelId, messages, schema, objectName } = params;
if (!apiKey) throw new Error('OpenAI API key is required');
if (!modelId) throw new Error('Model ID is required');
if (!messages || !Array.isArray(messages)) throw new Error('Messages array is required');
if (!schema) throw new Error('Schema is required');
if (!objectName) throw new Error('Object name is required');
const openai = createOpenAI({ apiKey });
// Using the Vercel AI SDK's function calling capabilities
const response = await openai.chat.completions.create({
model: modelId,
messages,
functions: [
{
name: objectName,
description: `Generate a ${objectName} object`,
parameters: schema,
},
],
function_call: { name: objectName },
});
const functionCall = response.choices[0].message.function_call;
return JSON.parse(functionCall.arguments);
} catch (error) {
console.error('OpenAI object generation error:', error);
throw new Error(`OpenAI object generation error: ${error.message}`);
}
}
```
</info added on 2025-04-27T05:33:49.977Z>
<info added on 2025-04-27T05:35:03.679Z>
<info added on 2025-04-28T10:15:22.123Z>
```javascript
// Additional implementation notes for openai.js
/**
* Export a provider info object for OpenAI
*/
export const providerInfo = {
id: 'openai',
name: 'OpenAI',
description: 'OpenAI API integration using Vercel AI SDK',
models: {
'gpt-4': {
id: 'gpt-4',
name: 'GPT-4',
contextWindow: 8192,
supportsFunctions: true,
},
'gpt-4-turbo': {
id: 'gpt-4-turbo',
name: 'GPT-4 Turbo',
contextWindow: 128000,
supportsFunctions: true,
},
'gpt-3.5-turbo': {
id: 'gpt-3.5-turbo',
name: 'GPT-3.5 Turbo',
contextWindow: 16385,
supportsFunctions: true,
}
}
};
/**
* Helper function to format error responses consistently
*
* @param {Error} error - The caught error
* @param {string} operation - The operation being performed
* @returns {Error} A formatted error
*/
function formatError(error, operation) {
// Extract OpenAI specific error details if available
const statusCode = error.status || error.statusCode;
const errorType = error.type || error.code || 'unknown_error';
// Create a more detailed error message
const message = `OpenAI ${operation} error (${errorType}): ${error.message}`;
// Create a new error with the formatted message
const formattedError = new Error(message);
// Add additional properties for debugging
formattedError.originalError = error;
formattedError.provider = 'openai';
formattedError.statusCode = statusCode;
formattedError.errorType = errorType;
return formattedError;
}
/**
* Example usage with the unified AI services interface:
*
* // In ai-services-unified.js
* import * as openaiProvider from './ai-providers/openai.js';
*
* export async function generateText(params) {
* switch(params.provider) {
* case 'openai':
* return openaiProvider.generateOpenAIText(params);
* // other providers...
* }
* }
*/
// Note: For proper error handling with the Vercel AI SDK, you may need to:
// 1. Check for rate limiting errors (429)
// 2. Handle token context window exceeded errors
// 3. Implement exponential backoff for retries on 5xx errors
// 4. Parse streaming errors properly from the ReadableStream
```
</info added on 2025-04-28T10:15:22.123Z>
</info added on 2025-04-27T05:35:03.679Z>
<info added on 2025-04-27T05:39:31.942Z>
```javascript
// Correction for openai.js provider module
// IMPORTANT: Use the correct import from Vercel AI SDK
import { createOpenAI, openai } from '@ai-sdk/openai';
// Note: Before using this module, install the required dependency:
// npm install @ai-sdk/openai
// The rest of the implementation remains the same, but uses the correct imports.
// When implementing this module, ensure your package.json includes this dependency.
// For streaming implementations with the Vercel AI SDK, you can also use the
// streamText and experimental streamUI methods:
/**
* Example of using streamText for simpler streaming implementation
*/
export async function streamOpenAITextSimplified(params) {
try {
const { apiKey, modelId, messages, maxTokens, temperature = 0.7 } = params;
if (!apiKey) throw new Error('OpenAI API key is required');
const openaiClient = createOpenAI({ apiKey });
return openaiClient.streamText({
model: modelId,
messages,
temperature,
maxTokens,
});
} catch (error) {
console.error('OpenAI streaming error:', error);
throw new Error(`OpenAI streaming error: ${error.message}`);
}
}
```
</info added on 2025-04-27T05:39:31.942Z>
## 23. Implement Conditional Provider Logic in `ai-services-unified.js` [done]
### Dependencies: 61.20,61.21,61.22,61.24,61.25,61.26,61.27,61.28,61.29,61.30,61.34
### Description: Implement logic within the functions of `ai-services-unified.js` (e.g., `generateTextService`, `generateObjectService`, `streamChatService`) to dynamically select and call the appropriate provider module (`anthropic.js`, `perplexity.js`, etc.) based on configuration (e.g., environment variables like `AI_PROVIDER` and `AI_MODEL` from `process.env` or `session.env`).
@@ -1425,7 +1670,7 @@ function checkProviderCapability(provider, capability) {
```
</info added on 2025-04-20T03:52:13.065Z>
## 24. Implement `google.js` Provider Module using Vercel AI SDK [pending]
## 24. Implement `google.js` Provider Module using Vercel AI SDK [done]
### Dependencies: None
### Description: Create and implement the `google.js` module within `src/ai-providers/`. This module should contain functions to interact with Google AI models (e.g., Gemini) using the **Vercel AI SDK (`@ai-sdk/google`)**, adhering to the standardized input/output format defined for `ai-services-unified.js`.
### Details:

11
tasks/task_070.txt Normal file
View File

@@ -0,0 +1,11 @@
# Task ID: 70
# Title: Implement 'diagram' command for Mermaid diagram generation
# Status: pending
# Dependencies: None
# Priority: medium
# Description: Develop a CLI command named 'diagram' that generates Mermaid diagrams to visualize task dependencies and workflows, with options to target specific tasks or generate comprehensive diagrams for all tasks.
# Details:
The task involves implementing a new command that accepts an optional '--id' parameter: if provided, the command generates a diagram illustrating the chosen task and its dependencies; if omitted, it produces a diagram that includes all tasks. The diagrams should use color coding to reflect task status and arrows to denote dependencies. In addition to CLI rendering, the command should offer an option to save the output as a Markdown (.md) file. Consider integrating with the existing task management system to pull task details and status. Pay attention to formatting consistency and error handling for invalid or missing task IDs. Comments should be added to the code to improve maintainability, and unit tests should cover edge cases such as cyclic dependencies, missing tasks, and invalid input formats.
# Test Strategy:
Verify the command functionality by testing with both specific task IDs and general invocation: 1) Run the command with a valid '--id' and ensure the resulting diagram accurately depicts the specified task's dependencies with correct color codings for statuses. 2) Execute the command without '--id' to ensure a complete workflow diagram is generated for all tasks. 3) Check that arrows correctly represent dependency relationships. 4) Validate the Markdown (.md) file export option by confirming the file format and content after saving. 5) Test error responses for non-existent task IDs and malformed inputs.

23
tasks/task_071.txt Normal file
View File

@@ -0,0 +1,23 @@
# Task ID: 71
# Title: Add Model-Specific maxTokens Override Configuration
# Status: pending
# Dependencies: None
# Priority: high
# Description: Implement functionality to allow specifying a maximum token limit for individual AI models within .taskmasterconfig, overriding the role-based maxTokens if the model-specific limit is lower.
# Details:
1. **Modify `.taskmasterconfig` Structure:** Add a new top-level section `modelOverrides` (e.g., `"modelOverrides": { "o3-mini": { "maxTokens": 100000 } }`).
2. **Update `config-manager.js`:**
- Modify config loading to read the new `modelOverrides` section.
- Update `getParametersForRole(role)` logic: Fetch role defaults (roleMaxTokens, temperature). Get the modelId for the role. Look up `modelOverrides[modelId].maxTokens` (modelSpecificMaxTokens). Calculate `effectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens ?? Infinity)`. Return `{ maxTokens: effectiveMaxTokens, temperature }`.
3. **Update Documentation:** Add an example of `modelOverrides` to `.taskmasterconfig.example` or relevant documentation.
# Test Strategy:
1. **Unit Tests (`config-manager.js`):**
- Verify `getParametersForRole` returns role defaults when no override exists.
- Verify `getParametersForRole` returns the lower model-specific limit when an override exists and is lower.
- Verify `getParametersForRole` returns the role limit when an override exists but is higher.
- Verify handling of missing `modelOverrides` section.
2. **Integration Tests (`ai-services-unified.js`):**
- Call an AI service (e.g., `generateTextService`) with a config having a model override.
- Mock the underlying provider function.
- Assert that the `maxTokens` value passed to the mocked provider function matches the expected (potentially overridden) minimum value.

File diff suppressed because one or more lines are too long