refactor: Standardize configuration and environment variable access
This commit centralizes configuration and environment variable access across various modules by consistently utilizing getters from scripts/modules/config-manager.js. This replaces direct access to process.env and the global CONFIG object, leading to improved consistency, maintainability, testability, and better handling of session-specific configurations within the MCP context.
Key changes include:
- Centralized Getters: Replaced numerous instances of process.env.* and CONFIG.* with corresponding getter functions (e.g., getLogLevel, getMainModelId, getResearchMaxTokens, getMainTemperature, isApiKeySet, getDebugFlag, getDefaultSubtasks).
- Session Awareness: Ensured that the session object is passed to config getters where necessary, particularly within AI service calls (ai-services.js, add-task.js) and error handling (ai-services.js), allowing for session-specific environment overrides.
- API Key Checks: Standardized API key availability checks using isApiKeySet() instead of directly checking process.env.* (e.g., for Perplexity in commands.js and ai-services.js).
- Client Instantiation Cleanup: Removed now-redundant/obsolete local client instantiation functions (getAnthropicClient, getPerplexityClient) from ai-services.js and the global Anthropic client initialization from dependency-manager.js. Client creation should now rely on the config manager and factory patterns.
- Consistent Debug Flag Usage: Standardized calls to getDebugFlag() in commands.js, removing potentially unnecessary null arguments.
- Accurate Progress Calculation: Updated AI stream progress reporting (ai-services.js, add-task.js) to use getMainMaxTokens(session) for more accurate calculations.
- Minor Cleanup: Removed unused import from scripts/modules/commands.js.
Specific module updates:
- :
- Uses getLogLevel() instead of process.env.LOG_LEVEL.
- :
- Replaced direct env/config access for model IDs, tokens, temperature, API keys, and default subtasks with appropriate getters.
- Passed session to handleClaudeError.
- Removed local getPerplexityClient and getAnthropicClient functions.
- Updated progress calculations to use getMainMaxTokens(session).
- :
- Uses isApiKeySet('perplexity') for API key checks.
- Uses getDebugFlag() consistently for debug checks.
- Removed unused import.
- :
- Removed global Anthropic client initialization.
- :
- Uses config getters (getResearch..., getMain...) for Perplexity and Claude API call parameters, preserving customEnv override logic.
This refactoring also resolves a potential SyntaxError: Identifier 'getPerplexityClient' has already been declared by removing the duplicated/obsolete function definition previously present in ai-services.js.
This commit is contained in:
@@ -6,6 +6,16 @@ import {
|
||||
parseSubtasksFromText
|
||||
} from '../ai-services.js';
|
||||
|
||||
// Import necessary config getters
|
||||
import {
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature,
|
||||
getResearchModelId,
|
||||
getResearchMaxTokens,
|
||||
getResearchTemperature
|
||||
} from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Call AI to generate subtasks based on a prompt
|
||||
* @param {string} prompt - The prompt to send to the AI
|
||||
@@ -26,9 +36,9 @@ async function getSubtasksFromAI(
|
||||
|
||||
// Prepare API parameters
|
||||
const apiParams = {
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
model: getMainModelId(session),
|
||||
max_tokens: getMainMaxTokens(session),
|
||||
temperature: getMainTemperature(session),
|
||||
system:
|
||||
'You are an AI assistant helping with task breakdown for software development.',
|
||||
messages: [{ role: 'user', content: prompt }]
|
||||
@@ -46,10 +56,7 @@ async function getSubtasksFromAI(
|
||||
mcpLog.info('Using Perplexity AI for research-backed subtasks');
|
||||
}
|
||||
|
||||
const perplexityModel =
|
||||
process.env.PERPLEXITY_MODEL ||
|
||||
session?.env?.PERPLEXITY_MODEL ||
|
||||
'sonar-pro';
|
||||
const perplexityModel = getResearchModelId(session);
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
messages: [
|
||||
@@ -60,8 +67,8 @@ async function getSubtasksFromAI(
|
||||
},
|
||||
{ role: 'user', content: prompt }
|
||||
],
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session)
|
||||
});
|
||||
|
||||
responseText = result.choices[0].message.content;
|
||||
|
||||
Reference in New Issue
Block a user