refactor: Standardize configuration and environment variable access
This commit centralizes configuration and environment variable access across various modules by consistently utilizing getters from scripts/modules/config-manager.js. This replaces direct access to process.env and the global CONFIG object, leading to improved consistency, maintainability, testability, and better handling of session-specific configurations within the MCP context.
Key changes include:
- Centralized Getters: Replaced numerous instances of process.env.* and CONFIG.* with corresponding getter functions (e.g., getLogLevel, getMainModelId, getResearchMaxTokens, getMainTemperature, isApiKeySet, getDebugFlag, getDefaultSubtasks).
- Session Awareness: Ensured that the session object is passed to config getters where necessary, particularly within AI service calls (ai-services.js, add-task.js) and error handling (ai-services.js), allowing for session-specific environment overrides.
- API Key Checks: Standardized API key availability checks using isApiKeySet() instead of directly checking process.env.* (e.g., for Perplexity in commands.js and ai-services.js).
- Client Instantiation Cleanup: Removed now-redundant/obsolete local client instantiation functions (getAnthropicClient, getPerplexityClient) from ai-services.js and the global Anthropic client initialization from dependency-manager.js. Client creation should now rely on the config manager and factory patterns.
- Consistent Debug Flag Usage: Standardized calls to getDebugFlag() in commands.js, removing potentially unnecessary null arguments.
- Accurate Progress Calculation: Updated AI stream progress reporting (ai-services.js, add-task.js) to use getMainMaxTokens(session) for more accurate calculations.
- Minor Cleanup: Removed unused import from scripts/modules/commands.js.
Specific module updates:
- :
- Uses getLogLevel() instead of process.env.LOG_LEVEL.
- :
- Replaced direct env/config access for model IDs, tokens, temperature, API keys, and default subtasks with appropriate getters.
- Passed session to handleClaudeError.
- Removed local getPerplexityClient and getAnthropicClient functions.
- Updated progress calculations to use getMainMaxTokens(session).
- :
- Uses isApiKeySet('perplexity') for API key checks.
- Uses getDebugFlag() consistently for debug checks.
- Removed unused import.
- :
- Removed global Anthropic client initialization.
- :
- Uses config getters (getResearch..., getMain...) for Perplexity and Claude API call parameters, preserving customEnv override logic.
This refactoring also resolves a potential SyntaxError: Identifier 'getPerplexityClient' has already been declared by removing the duplicated/obsolete function definition previously present in ai-services.js.
This commit is contained in:
@@ -11,7 +11,15 @@ import {
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import {
|
||||
getDebugFlag,
|
||||
getResearchModelId,
|
||||
getResearchTemperature,
|
||||
getResearchMaxTokens,
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature
|
||||
} from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
@@ -204,13 +212,9 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
}
|
||||
|
||||
if (modelType === 'perplexity') {
|
||||
// Call Perplexity AI using proper format
|
||||
const perplexityModel =
|
||||
process.env.PERPLEXITY_MODEL ||
|
||||
session?.env?.PERPLEXITY_MODEL ||
|
||||
'sonar-pro';
|
||||
// Call Perplexity AI using proper format and getters
|
||||
const result = await client.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
model: getResearchModelId(session),
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -218,23 +222,11 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: `Here are the tasks to update:
|
||||
${taskData}
|
||||
|
||||
Please update these tasks based on the following new context:
|
||||
${prompt}
|
||||
|
||||
IMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
|
||||
|
||||
Return only the updated tasks as a valid JSON array.`
|
||||
content: `Here are the tasks to update:\n${taskData}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated tasks as a valid JSON array.`
|
||||
}
|
||||
],
|
||||
temperature: parseFloat(
|
||||
process.env.TEMPERATURE ||
|
||||
session?.env?.TEMPERATURE ||
|
||||
CONFIG.temperature
|
||||
),
|
||||
max_tokens: 8700
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session)
|
||||
});
|
||||
|
||||
const responseText = result.choices[0].message.content;
|
||||
@@ -270,11 +262,11 @@ Return only the updated tasks as a valid JSON array.`
|
||||
}, 500);
|
||||
}
|
||||
|
||||
// Use streaming API call
|
||||
// Use streaming API call with getters
|
||||
const stream = await client.messages.create({
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
model: getMainModelId(session),
|
||||
max_tokens: getMainMaxTokens(session),
|
||||
temperature: getMainTemperature(session),
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
@@ -300,12 +292,13 @@ Return only the updated task as a valid JSON object.`
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({
|
||||
progress: (responseText.length / CONFIG.maxTokens) * 100
|
||||
progress:
|
||||
(responseText.length / getMainMaxTokens(session)) * 100
|
||||
});
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(
|
||||
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
|
||||
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user