refactor: Standardize configuration and environment variable access
This commit centralizes configuration and environment variable access across various modules by consistently utilizing getters from scripts/modules/config-manager.js. This replaces direct access to process.env and the global CONFIG object, leading to improved consistency, maintainability, testability, and better handling of session-specific configurations within the MCP context.
Key changes include:
- Centralized Getters: Replaced numerous instances of process.env.* and CONFIG.* with corresponding getter functions (e.g., getLogLevel, getMainModelId, getResearchMaxTokens, getMainTemperature, isApiKeySet, getDebugFlag, getDefaultSubtasks).
- Session Awareness: Ensured that the session object is passed to config getters where necessary, particularly within AI service calls (ai-services.js, add-task.js) and error handling (ai-services.js), allowing for session-specific environment overrides.
- API Key Checks: Standardized API key availability checks using isApiKeySet() instead of directly checking process.env.* (e.g., for Perplexity in commands.js and ai-services.js).
- Client Instantiation Cleanup: Removed now-redundant/obsolete local client instantiation functions (getAnthropicClient, getPerplexityClient) from ai-services.js and the global Anthropic client initialization from dependency-manager.js. Client creation should now rely on the config manager and factory patterns.
- Consistent Debug Flag Usage: Standardized calls to getDebugFlag() in commands.js, removing potentially unnecessary null arguments.
- Accurate Progress Calculation: Updated AI stream progress reporting (ai-services.js, add-task.js) to use getMainMaxTokens(session) for more accurate calculations.
- Minor Cleanup: Removed unused import from scripts/modules/commands.js.
Specific module updates:
- :
- Uses getLogLevel() instead of process.env.LOG_LEVEL.
- :
- Replaced direct env/config access for model IDs, tokens, temperature, API keys, and default subtasks with appropriate getters.
- Passed session to handleClaudeError.
- Removed local getPerplexityClient and getAnthropicClient functions.
- Updated progress calculations to use getMainMaxTokens(session).
- :
- Uses isApiKeySet('perplexity') for API key checks.
- Uses getDebugFlag() consistently for debug checks.
- Removed unused import.
- :
- Removed global Anthropic client initialization.
- :
- Uses config getters (getResearch..., getMain...) for Perplexity and Claude API call parameters, preserving customEnv override logic.
This refactoring also resolves a potential SyntaxError: Identifier 'getPerplexityClient' has already been declared by removing the duplicated/obsolete function definition previously present in ai-services.js.
This commit is contained in:
@@ -11,7 +11,15 @@ import {
|
||||
} from '../ui.js';
|
||||
import { log, readJSON, writeJSON, truncate } from '../utils.js';
|
||||
import { _handleAnthropicStream } from '../ai-services.js';
|
||||
import { getDefaultPriority } from '../config-manager.js';
|
||||
import {
|
||||
getDefaultPriority,
|
||||
getResearchModelId,
|
||||
getResearchTemperature,
|
||||
getResearchMaxTokens,
|
||||
getMainModelId,
|
||||
getMainTemperature,
|
||||
getMainMaxTokens
|
||||
} from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Add a new task using AI
|
||||
@@ -183,46 +191,26 @@ async function addTask(
|
||||
|
||||
if (modelType === 'perplexity') {
|
||||
// Use Perplexity AI
|
||||
const perplexityModel =
|
||||
process.env.PERPLEXITY_MODEL ||
|
||||
session?.env?.PERPLEXITY_MODEL ||
|
||||
'sonar-pro';
|
||||
const response = await client.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
model: getResearchModelId(session),
|
||||
messages: [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userPrompt }
|
||||
],
|
||||
temperature: parseFloat(
|
||||
process.env.TEMPERATURE ||
|
||||
session?.env?.TEMPERATURE ||
|
||||
CONFIG.temperature
|
||||
),
|
||||
max_tokens: parseInt(
|
||||
process.env.MAX_TOKENS ||
|
||||
session?.env?.MAX_TOKENS ||
|
||||
CONFIG.maxTokens
|
||||
)
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session)
|
||||
});
|
||||
|
||||
const responseText = response.choices[0].message.content;
|
||||
aiGeneratedTaskData = parseTaskJsonResponse(responseText);
|
||||
} else {
|
||||
// Use Claude (default)
|
||||
// Prepare API parameters
|
||||
// Prepare API parameters using getters, preserving customEnv override
|
||||
const apiParams = {
|
||||
model:
|
||||
session?.env?.ANTHROPIC_MODEL ||
|
||||
CONFIG.model ||
|
||||
customEnv?.ANTHROPIC_MODEL,
|
||||
max_tokens:
|
||||
session?.env?.MAX_TOKENS ||
|
||||
CONFIG.maxTokens ||
|
||||
customEnv?.MAX_TOKENS,
|
||||
model: customEnv?.ANTHROPIC_MODEL || getMainModelId(session),
|
||||
max_tokens: customEnv?.MAX_TOKENS || getMainMaxTokens(session),
|
||||
temperature:
|
||||
session?.env?.TEMPERATURE ||
|
||||
CONFIG.temperature ||
|
||||
customEnv?.TEMPERATURE,
|
||||
customEnv?.TEMPERATURE || getMainTemperature(session),
|
||||
system: systemPrompt,
|
||||
messages: [{ role: 'user', content: userPrompt }]
|
||||
};
|
||||
|
||||
@@ -8,7 +8,17 @@ import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
||||
|
||||
import { generateComplexityAnalysisPrompt } from '../ai-services.js';
|
||||
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import {
|
||||
getDebugFlag,
|
||||
getProjectName,
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature,
|
||||
getResearchModelId,
|
||||
getResearchMaxTokens,
|
||||
getResearchTemperature,
|
||||
getDefaultSubtasks
|
||||
} from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Analyzes task complexity and generates expansion recommendations
|
||||
@@ -127,6 +137,83 @@ async function analyzeTaskComplexity(
|
||||
}
|
||||
}
|
||||
|
||||
// If after filtering, there are no tasks left to analyze, exit early.
|
||||
if (tasksData.tasks.length === 0) {
|
||||
const emptyReport = {
|
||||
meta: {
|
||||
generatedAt: new Date().toISOString(),
|
||||
tasksAnalyzed: tasksData.tasks.length,
|
||||
thresholdScore: thresholdScore,
|
||||
projectName: getProjectName(session),
|
||||
usedResearch: useResearch
|
||||
},
|
||||
complexityAnalysis: []
|
||||
};
|
||||
// Write the report to file
|
||||
reportLog(`Writing complexity report to ${outputPath}...`, 'info');
|
||||
writeJSON(outputPath, emptyReport);
|
||||
|
||||
reportLog(
|
||||
`Task complexity analysis complete. Report written to ${outputPath}`,
|
||||
'success'
|
||||
);
|
||||
|
||||
// Only show UI elements for text output (CLI)
|
||||
if (outputFormat === 'text') {
|
||||
console.log(
|
||||
chalk.green(
|
||||
`Task complexity analysis complete. Report written to ${outputPath}`
|
||||
)
|
||||
);
|
||||
|
||||
// Display a summary of findings
|
||||
const highComplexity = emptyReport.complexityAnalysis.filter(
|
||||
(t) => t.complexityScore >= 8
|
||||
).length;
|
||||
const mediumComplexity = emptyReport.complexityAnalysis.filter(
|
||||
(t) => t.complexityScore >= 5 && t.complexityScore < 8
|
||||
).length;
|
||||
const lowComplexity = emptyReport.complexityAnalysis.filter(
|
||||
(t) => t.complexityScore < 5
|
||||
).length;
|
||||
const totalAnalyzed = emptyReport.complexityAnalysis.length;
|
||||
|
||||
console.log('\nComplexity Analysis Summary:');
|
||||
console.log('----------------------------');
|
||||
console.log(`Tasks in input file: ${tasksData.tasks.length}`);
|
||||
console.log(`Tasks successfully analyzed: ${totalAnalyzed}`);
|
||||
console.log(`High complexity tasks: ${highComplexity}`);
|
||||
console.log(`Medium complexity tasks: ${mediumComplexity}`);
|
||||
console.log(`Low complexity tasks: ${lowComplexity}`);
|
||||
console.log(
|
||||
`Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})`
|
||||
);
|
||||
console.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);
|
||||
console.log(
|
||||
`\nSee ${outputPath} for the full report and expansion commands.`
|
||||
);
|
||||
|
||||
// Show next steps suggestions
|
||||
console.log(
|
||||
boxen(
|
||||
chalk.white.bold('Suggested Next Steps:') +
|
||||
'\n\n' +
|
||||
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\n` +
|
||||
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\n` +
|
||||
`${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`,
|
||||
{
|
||||
padding: 1,
|
||||
borderColor: 'cyan',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1 }
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return emptyReport;
|
||||
}
|
||||
|
||||
// Prepare the prompt for the LLM
|
||||
const prompt = generateComplexityAnalysisPrompt(tasksData);
|
||||
|
||||
@@ -183,11 +270,9 @@ Your response must be a clean JSON array only, following exactly this format:
|
||||
|
||||
DO NOT include any text before or after the JSON array. No explanations, no markdown formatting.`;
|
||||
|
||||
// Keep the direct AI call for now, use config getters for parameters
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model:
|
||||
process.env.PERPLEXITY_MODEL ||
|
||||
session?.env?.PERPLEXITY_MODEL ||
|
||||
'sonar-pro',
|
||||
model: getResearchModelId(session),
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -199,8 +284,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
content: researchPrompt
|
||||
}
|
||||
],
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
max_tokens: 8700,
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session),
|
||||
web_search_options: {
|
||||
search_context_size: 'high'
|
||||
},
|
||||
@@ -236,6 +321,12 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
console.log(chalk.gray('Response first 200 chars:'));
|
||||
console.log(chalk.gray(fullResponse.substring(0, 200)));
|
||||
}
|
||||
|
||||
if (getDebugFlag(session)) {
|
||||
console.debug(
|
||||
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
|
||||
);
|
||||
}
|
||||
} catch (perplexityError) {
|
||||
reportLog(
|
||||
`Falling back to Claude for complexity analysis: ${perplexityError.message}`,
|
||||
@@ -287,12 +378,11 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
);
|
||||
}
|
||||
|
||||
// Call the LLM API with streaming
|
||||
// Keep the direct AI call for now, use config getters for parameters
|
||||
const stream = await anthropic.messages.create({
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
model:
|
||||
modelOverride || CONFIG.model || session?.env?.ANTHROPIC_MODEL,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
max_tokens: getMainMaxTokens(session),
|
||||
model: modelOverride || getMainModelId(session),
|
||||
temperature: getMainTemperature(session),
|
||||
messages: [{ role: 'user', content: prompt }],
|
||||
system:
|
||||
'You are an expert software architect and project manager analyzing task complexity. Respond only with valid JSON.',
|
||||
@@ -318,12 +408,13 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({
|
||||
progress: (fullResponse.length / CONFIG.maxTokens) * 100
|
||||
progress:
|
||||
(fullResponse.length / getMainMaxTokens(session)) * 100
|
||||
});
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(
|
||||
`Progress: ${(fullResponse.length / CONFIG.maxTokens) * 100}%`
|
||||
`Progress: ${(fullResponse.length / getMainMaxTokens(session)) * 100}%`
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -797,7 +888,7 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
generatedAt: new Date().toISOString(),
|
||||
tasksAnalyzed: tasksData.tasks.length,
|
||||
thresholdScore: thresholdScore,
|
||||
projectName: tasksData.meta?.projectName || 'Your Project Name',
|
||||
projectName: getProjectName(session),
|
||||
usedResearch: useResearch
|
||||
},
|
||||
complexityAnalysis: complexityAnalysis
|
||||
@@ -865,6 +956,12 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
if (getDebugFlag(session)) {
|
||||
console.debug(
|
||||
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return finalReport;
|
||||
@@ -885,8 +982,7 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
console.error(
|
||||
chalk.red(`Error parsing complexity analysis: ${error.message}`)
|
||||
);
|
||||
if (getDebugFlag()) {
|
||||
// Use getter
|
||||
if (getDebugFlag(session)) {
|
||||
console.debug(
|
||||
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
|
||||
);
|
||||
@@ -931,8 +1027,7 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
);
|
||||
}
|
||||
|
||||
if (getDebugFlag()) {
|
||||
// Use getter
|
||||
if (getDebugFlag(session)) {
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,12 @@ import {
|
||||
parseSubtasksFromText
|
||||
} from '../ai-services.js';
|
||||
|
||||
import { getDefaultSubtasks } from '../config-manager.js';
|
||||
import {
|
||||
getDefaultSubtasks,
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature
|
||||
} from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
@@ -207,11 +212,11 @@ Return exactly ${subtaskCount} subtasks with the following JSON structure:
|
||||
|
||||
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
|
||||
|
||||
// Prepare API parameters
|
||||
// Prepare API parameters using getters
|
||||
const apiParams = {
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
model: getMainModelId(session),
|
||||
max_tokens: getMainMaxTokens(session),
|
||||
temperature: getMainTemperature(session),
|
||||
system: systemPrompt,
|
||||
messages: [{ role: 'user', content: userPrompt }]
|
||||
};
|
||||
|
||||
@@ -6,6 +6,16 @@ import {
|
||||
parseSubtasksFromText
|
||||
} from '../ai-services.js';
|
||||
|
||||
// Import necessary config getters
|
||||
import {
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature,
|
||||
getResearchModelId,
|
||||
getResearchMaxTokens,
|
||||
getResearchTemperature
|
||||
} from '../config-manager.js';
|
||||
|
||||
/**
|
||||
* Call AI to generate subtasks based on a prompt
|
||||
* @param {string} prompt - The prompt to send to the AI
|
||||
@@ -26,9 +36,9 @@ async function getSubtasksFromAI(
|
||||
|
||||
// Prepare API parameters
|
||||
const apiParams = {
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
model: getMainModelId(session),
|
||||
max_tokens: getMainMaxTokens(session),
|
||||
temperature: getMainTemperature(session),
|
||||
system:
|
||||
'You are an AI assistant helping with task breakdown for software development.',
|
||||
messages: [{ role: 'user', content: prompt }]
|
||||
@@ -46,10 +56,7 @@ async function getSubtasksFromAI(
|
||||
mcpLog.info('Using Perplexity AI for research-backed subtasks');
|
||||
}
|
||||
|
||||
const perplexityModel =
|
||||
process.env.PERPLEXITY_MODEL ||
|
||||
session?.env?.PERPLEXITY_MODEL ||
|
||||
'sonar-pro';
|
||||
const perplexityModel = getResearchModelId(session);
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
messages: [
|
||||
@@ -60,8 +67,8 @@ async function getSubtasksFromAI(
|
||||
},
|
||||
{ role: 'user', content: prompt }
|
||||
],
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session)
|
||||
});
|
||||
|
||||
responseText = result.choices[0].message.content;
|
||||
|
||||
@@ -97,7 +97,8 @@ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
||||
if (!options?.mcpLog) {
|
||||
console.error(chalk.red(`Error: ${error.message}`));
|
||||
|
||||
if (getDebugFlag()) {
|
||||
// Pass session to getDebugFlag
|
||||
if (getDebugFlag(options?.session)) {
|
||||
// Use getter
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
@@ -11,7 +11,15 @@ import {
|
||||
} from '../ui.js';
|
||||
import { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';
|
||||
import { getAvailableAIModel } from '../ai-services.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import {
|
||||
getDebugFlag,
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature,
|
||||
getResearchModelId,
|
||||
getResearchMaxTokens,
|
||||
getResearchTemperature
|
||||
} from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
@@ -231,26 +239,15 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
|
||||
if (modelType === 'perplexity') {
|
||||
// Construct Perplexity payload
|
||||
const perplexityModel =
|
||||
process.env.PERPLEXITY_MODEL ||
|
||||
session?.env?.PERPLEXITY_MODEL ||
|
||||
'sonar-pro';
|
||||
const perplexityModel = getResearchModelId(session);
|
||||
const response = await client.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
messages: [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userMessageContent }
|
||||
],
|
||||
temperature: parseFloat(
|
||||
process.env.TEMPERATURE ||
|
||||
session?.env?.TEMPERATURE ||
|
||||
CONFIG.temperature
|
||||
),
|
||||
max_tokens: parseInt(
|
||||
process.env.MAX_TOKENS ||
|
||||
session?.env?.MAX_TOKENS ||
|
||||
CONFIG.maxTokens
|
||||
)
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session)
|
||||
});
|
||||
additionalInformation = response.choices[0].message.content.trim();
|
||||
} else {
|
||||
@@ -272,11 +269,11 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
}, 500);
|
||||
}
|
||||
|
||||
// Construct Claude payload
|
||||
// Construct Claude payload using config getters
|
||||
const stream = await client.messages.create({
|
||||
model: CONFIG.model,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
temperature: CONFIG.temperature,
|
||||
model: getMainModelId(session),
|
||||
max_tokens: getMainMaxTokens(session),
|
||||
temperature: getMainTemperature(session),
|
||||
system: systemPrompt,
|
||||
messages: [{ role: 'user', content: userMessageContent }],
|
||||
stream: true
|
||||
@@ -288,12 +285,13 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({
|
||||
progress: (responseText.length / CONFIG.maxTokens) * 100
|
||||
progress:
|
||||
(responseText.length / getMainMaxTokens(session)) * 100
|
||||
});
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(
|
||||
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
|
||||
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -540,7 +538,7 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here'
|
||||
);
|
||||
console.log(
|
||||
' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt=\"...\"'
|
||||
' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt="..."'
|
||||
);
|
||||
} else if (error.message?.includes('overloaded')) {
|
||||
// Catch final overload error
|
||||
@@ -568,7 +566,7 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
||||
);
|
||||
}
|
||||
|
||||
if (getDebugFlag()) {
|
||||
if (getDebugFlag(session)) {
|
||||
// Use getter
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
@@ -13,7 +13,16 @@ import {
|
||||
} from '../ui.js';
|
||||
|
||||
import { _handleAnthropicStream } from '../ai-services.js';
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import {
|
||||
getDebugFlag,
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature,
|
||||
getResearchModelId,
|
||||
getResearchMaxTokens,
|
||||
getResearchTemperature,
|
||||
isApiKeySet
|
||||
} from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
@@ -64,15 +73,10 @@ async function updateTaskById(
|
||||
);
|
||||
}
|
||||
|
||||
// Validate research flag
|
||||
if (
|
||||
useResearch &&
|
||||
(!perplexity ||
|
||||
!process.env.PERPLEXITY_API_KEY ||
|
||||
session?.env?.PERPLEXITY_API_KEY)
|
||||
) {
|
||||
// Validate research flag and API key
|
||||
if (useResearch && !isApiKeySet('perplexity', session)) {
|
||||
report(
|
||||
'Perplexity AI is not available. Falling back to Claude AI.',
|
||||
'Perplexity AI research requested but API key is not set. Falling back to main AI.',
|
||||
'warn'
|
||||
);
|
||||
|
||||
@@ -274,7 +278,7 @@ The changes described in the prompt should be thoughtfully applied to make the t
|
||||
session?.env?.PERPLEXITY_MODEL ||
|
||||
'sonar-pro';
|
||||
const result = await client.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
model: getResearchModelId(session),
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -293,12 +297,8 @@ IMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status
|
||||
Return only the updated task as a valid JSON object.`
|
||||
}
|
||||
],
|
||||
temperature: parseFloat(
|
||||
process.env.TEMPERATURE ||
|
||||
session?.env?.TEMPERATURE ||
|
||||
CONFIG.temperature
|
||||
),
|
||||
max_tokens: 8700
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session)
|
||||
});
|
||||
|
||||
const responseText = result.choices[0].message.content;
|
||||
@@ -343,9 +343,9 @@ Return only the updated task as a valid JSON object.`
|
||||
|
||||
// Use streaming API call
|
||||
const stream = await client.messages.create({
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
model: getMainModelId(session),
|
||||
max_tokens: getMainMaxTokens(session),
|
||||
temperature: getMainTemperature(session),
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
@@ -371,12 +371,13 @@ Return only the updated task as a valid JSON object.`
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({
|
||||
progress: (responseText.length / CONFIG.maxTokens) * 100
|
||||
progress:
|
||||
(responseText.length / getMainMaxTokens(session)) * 100
|
||||
});
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(
|
||||
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
|
||||
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -667,7 +668,7 @@ Return only the updated task as a valid JSON object.`
|
||||
console.log(' 2. Use a valid task ID with the --id parameter');
|
||||
}
|
||||
|
||||
if (getDebugFlag()) {
|
||||
if (getDebugFlag(session)) {
|
||||
// Use getter
|
||||
console.error(error);
|
||||
}
|
||||
|
||||
@@ -11,7 +11,15 @@ import {
|
||||
stopLoadingIndicator
|
||||
} from '../ui.js';
|
||||
|
||||
import { getDebugFlag } from '../config-manager.js';
|
||||
import {
|
||||
getDebugFlag,
|
||||
getResearchModelId,
|
||||
getResearchTemperature,
|
||||
getResearchMaxTokens,
|
||||
getMainModelId,
|
||||
getMainMaxTokens,
|
||||
getMainTemperature
|
||||
} from '../config-manager.js';
|
||||
import generateTaskFiles from './generate-task-files.js';
|
||||
|
||||
/**
|
||||
@@ -204,13 +212,9 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
}
|
||||
|
||||
if (modelType === 'perplexity') {
|
||||
// Call Perplexity AI using proper format
|
||||
const perplexityModel =
|
||||
process.env.PERPLEXITY_MODEL ||
|
||||
session?.env?.PERPLEXITY_MODEL ||
|
||||
'sonar-pro';
|
||||
// Call Perplexity AI using proper format and getters
|
||||
const result = await client.chat.completions.create({
|
||||
model: perplexityModel,
|
||||
model: getResearchModelId(session),
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -218,23 +222,11 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: `Here are the tasks to update:
|
||||
${taskData}
|
||||
|
||||
Please update these tasks based on the following new context:
|
||||
${prompt}
|
||||
|
||||
IMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
|
||||
|
||||
Return only the updated tasks as a valid JSON array.`
|
||||
content: `Here are the tasks to update:\n${taskData}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated tasks as a valid JSON array.`
|
||||
}
|
||||
],
|
||||
temperature: parseFloat(
|
||||
process.env.TEMPERATURE ||
|
||||
session?.env?.TEMPERATURE ||
|
||||
CONFIG.temperature
|
||||
),
|
||||
max_tokens: 8700
|
||||
temperature: getResearchTemperature(session),
|
||||
max_tokens: getResearchMaxTokens(session)
|
||||
});
|
||||
|
||||
const responseText = result.choices[0].message.content;
|
||||
@@ -270,11 +262,11 @@ Return only the updated tasks as a valid JSON array.`
|
||||
}, 500);
|
||||
}
|
||||
|
||||
// Use streaming API call
|
||||
// Use streaming API call with getters
|
||||
const stream = await client.messages.create({
|
||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
||||
model: getMainModelId(session),
|
||||
max_tokens: getMainMaxTokens(session),
|
||||
temperature: getMainTemperature(session),
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
@@ -300,12 +292,13 @@ Return only the updated task as a valid JSON object.`
|
||||
}
|
||||
if (reportProgress) {
|
||||
await reportProgress({
|
||||
progress: (responseText.length / CONFIG.maxTokens) * 100
|
||||
progress:
|
||||
(responseText.length / getMainMaxTokens(session)) * 100
|
||||
});
|
||||
}
|
||||
if (mcpLog) {
|
||||
mcpLog.info(
|
||||
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
|
||||
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user