refactor: Standardize configuration and environment variable access
This commit centralizes configuration and environment variable access across various modules by consistently utilizing getters from scripts/modules/config-manager.js. This replaces direct access to process.env and the global CONFIG object, leading to improved consistency, maintainability, testability, and better handling of session-specific configurations within the MCP context.
Key changes include:
- Centralized Getters: Replaced numerous instances of process.env.* and CONFIG.* with corresponding getter functions (e.g., getLogLevel, getMainModelId, getResearchMaxTokens, getMainTemperature, isApiKeySet, getDebugFlag, getDefaultSubtasks).
- Session Awareness: Ensured that the session object is passed to config getters where necessary, particularly within AI service calls (ai-services.js, add-task.js) and error handling (ai-services.js), allowing for session-specific environment overrides.
- API Key Checks: Standardized API key availability checks using isApiKeySet() instead of directly checking process.env.* (e.g., for Perplexity in commands.js and ai-services.js).
- Client Instantiation Cleanup: Removed now-redundant/obsolete local client instantiation functions (getAnthropicClient, getPerplexityClient) from ai-services.js and the global Anthropic client initialization from dependency-manager.js. Client creation should now rely on the config manager and factory patterns.
- Consistent Debug Flag Usage: Standardized calls to getDebugFlag() in commands.js, removing potentially unnecessary null arguments.
- Accurate Progress Calculation: Updated AI stream progress reporting (ai-services.js, add-task.js) to use getMainMaxTokens(session) for more accurate calculations.
- Minor Cleanup: Removed unused import from scripts/modules/commands.js.
Specific module updates:
- :
- Uses getLogLevel() instead of process.env.LOG_LEVEL.
- :
- Replaced direct env/config access for model IDs, tokens, temperature, API keys, and default subtasks with appropriate getters.
- Passed session to handleClaudeError.
- Removed local getPerplexityClient and getAnthropicClient functions.
- Updated progress calculations to use getMainMaxTokens(session).
- :
- Uses isApiKeySet('perplexity') for API key checks.
- Uses getDebugFlag() consistently for debug checks.
- Removed unused import.
- :
- Removed global Anthropic client initialization.
- :
- Uses config getters (getResearch..., getMain...) for Perplexity and Claude API call parameters, preserving customEnv override logic.
This refactoring also resolves a potential SyntaxError: Identifier 'getPerplexityClient' has already been declared by removing the duplicated/obsolete function definition previously present in ai-services.js.
This commit is contained in:
@@ -1,5 +1,6 @@
|
|||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import { isSilentMode } from '../../scripts/modules/utils.js';
|
import { isSilentMode } from '../../scripts/modules/utils.js';
|
||||||
|
import { getLogLevel } from '../../scripts/modules/config-manager.js';
|
||||||
|
|
||||||
// Define log levels
|
// Define log levels
|
||||||
const LOG_LEVELS = {
|
const LOG_LEVELS = {
|
||||||
@@ -10,10 +11,8 @@ const LOG_LEVELS = {
|
|||||||
success: 4
|
success: 4
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get log level from environment or default to info
|
// Get log level from config manager or default to info
|
||||||
const LOG_LEVEL = process.env.LOG_LEVEL
|
const LOG_LEVEL = LOG_LEVELS[getLogLevel().toLowerCase()] ?? LOG_LEVELS.info;
|
||||||
? (LOG_LEVELS[process.env.LOG_LEVEL.toLowerCase()] ?? LOG_LEVELS.info)
|
|
||||||
: LOG_LEVELS.info;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Logs a message with the specified level
|
* Logs a message with the specified level
|
||||||
|
|||||||
@@ -23,34 +23,14 @@ import {
|
|||||||
getDebugFlag,
|
getDebugFlag,
|
||||||
getResearchModelId,
|
getResearchModelId,
|
||||||
getResearchMaxTokens,
|
getResearchMaxTokens,
|
||||||
getResearchTemperature
|
getResearchTemperature,
|
||||||
|
getDefaultSubtasks,
|
||||||
|
isApiKeySet
|
||||||
} from './config-manager.js';
|
} from './config-manager.js';
|
||||||
|
|
||||||
// Load environment variables
|
// Load environment variables
|
||||||
dotenv.config();
|
dotenv.config();
|
||||||
|
|
||||||
/**
|
|
||||||
* Get or initialize the Perplexity client
|
|
||||||
* @param {object|null} [session=null] - Optional MCP session object.
|
|
||||||
* @returns {OpenAI} Perplexity client
|
|
||||||
*/
|
|
||||||
function getPerplexityClient(session = null) {
|
|
||||||
// Use resolveEnvVariable to get the key
|
|
||||||
const apiKey = resolveEnvVariable('PERPLEXITY_API_KEY', session);
|
|
||||||
if (!apiKey) {
|
|
||||||
throw new Error(
|
|
||||||
'PERPLEXITY_API_KEY environment variable is missing. Set it to use research-backed features.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
// Create and return a new client instance each time for now
|
|
||||||
// Caching can be handled by ai-client-factory later
|
|
||||||
return new OpenAI({
|
|
||||||
apiKey: apiKey,
|
|
||||||
baseURL: 'https://api.perplexity.ai'
|
|
||||||
});
|
|
||||||
// Removed the old caching logic using the global 'perplexity' variable
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the best available AI model for a given operation
|
* Get the best available AI model for a given operation
|
||||||
* @param {Object} options - Options for model selection
|
* @param {Object} options - Options for model selection
|
||||||
@@ -134,15 +114,16 @@ function getAvailableAIModel(options = {}, session = null) {
|
|||||||
/**
|
/**
|
||||||
* Handle Claude API errors with user-friendly messages
|
* Handle Claude API errors with user-friendly messages
|
||||||
* @param {Error} error - The error from Claude API
|
* @param {Error} error - The error from Claude API
|
||||||
|
* @param {object|null} [session=null] - The MCP session object (optional)
|
||||||
* @returns {string} User-friendly error message
|
* @returns {string} User-friendly error message
|
||||||
*/
|
*/
|
||||||
function handleClaudeError(error) {
|
function handleClaudeError(error, session = null) {
|
||||||
// Check if it's a structured error response
|
// Check if it's a structured error response
|
||||||
if (error.type === 'error' && error.error) {
|
if (error.type === 'error' && error.error) {
|
||||||
switch (error.error.type) {
|
switch (error.error.type) {
|
||||||
case 'overloaded_error':
|
case 'overloaded_error':
|
||||||
// Check if we can use Perplexity as a fallback
|
// Check if we can use Perplexity as a fallback using isApiKeySet
|
||||||
if (process.env.PERPLEXITY_API_KEY) {
|
if (isApiKeySet('perplexity', session)) {
|
||||||
return 'Claude is currently overloaded. Trying to fall back to Perplexity AI.';
|
return 'Claude is currently overloaded. Trying to fall back to Perplexity AI.';
|
||||||
}
|
}
|
||||||
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
|
return 'Claude is currently experiencing high demand and is overloaded. Please wait a few minutes and try again.';
|
||||||
@@ -258,8 +239,8 @@ Important: Your response must be valid JSON only, with no additional explanation
|
|||||||
modelConfig
|
modelConfig
|
||||||
);
|
);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Get user-friendly error message
|
// Get user-friendly error message, passing session
|
||||||
const userMessage = handleClaudeError(error);
|
const userMessage = handleClaudeError(error, session);
|
||||||
log('error', userMessage);
|
log('error', userMessage);
|
||||||
|
|
||||||
// Retry logic for certain errors
|
// Retry logic for certain errors
|
||||||
@@ -431,7 +412,7 @@ async function handleStreamingRequest(
|
|||||||
if (error.error?.type === 'overloaded_error') {
|
if (error.error?.type === 'overloaded_error') {
|
||||||
claudeOverloaded = true;
|
claudeOverloaded = true;
|
||||||
}
|
}
|
||||||
const userMessage = handleClaudeError(error);
|
const userMessage = handleClaudeError(error, session);
|
||||||
report(userMessage, 'error');
|
report(userMessage, 'error');
|
||||||
|
|
||||||
throw error;
|
throw error;
|
||||||
@@ -728,10 +709,8 @@ async function generateSubtasksWithPerplexity(
|
|||||||
logFn('info', `Researching context for task ${task.id}: ${task.title}`);
|
logFn('info', `Researching context for task ${task.id}: ${task.title}`);
|
||||||
const perplexityClient = getPerplexityClient(session);
|
const perplexityClient = getPerplexityClient(session);
|
||||||
|
|
||||||
const PERPLEXITY_MODEL =
|
// Use getter for model ID
|
||||||
process.env.PERPLEXITY_MODEL ||
|
const PERPLEXITY_MODEL = getResearchModelId(session);
|
||||||
session?.env?.PERPLEXITY_MODEL ||
|
|
||||||
'sonar-pro';
|
|
||||||
|
|
||||||
// Only create loading indicators if not in silent mode
|
// Only create loading indicators if not in silent mode
|
||||||
let researchLoadingIndicator = null;
|
let researchLoadingIndicator = null;
|
||||||
@@ -763,7 +742,7 @@ Include concrete code examples and technical considerations where relevant.`;
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
temperature: 0.1, // Lower temperature for more factual responses
|
temperature: 0.1, // Lower temperature for more factual responses
|
||||||
max_tokens: 8700, // Respect maximum input tokens for Perplexity (8719 max)
|
max_tokens: getResearchMaxTokens(session), // Respect maximum input tokens for Perplexity (8719 max)
|
||||||
web_search_options: {
|
web_search_options: {
|
||||||
search_context_size: 'high'
|
search_context_size: 'high'
|
||||||
},
|
},
|
||||||
@@ -867,7 +846,7 @@ Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use
|
|||||||
getAnthropicClient(session),
|
getAnthropicClient(session),
|
||||||
{
|
{
|
||||||
model: getMainModelId(session),
|
model: getMainModelId(session),
|
||||||
max_tokens: 8700,
|
max_tokens: getMainMaxTokens(session),
|
||||||
temperature: getMainTemperature(session),
|
temperature: getMainTemperature(session),
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
messages: [{ role: 'user', content: userPrompt }]
|
messages: [{ role: 'user', content: userPrompt }]
|
||||||
@@ -1035,7 +1014,7 @@ Analyze each task and return a JSON array with the following structure for each
|
|||||||
"taskId": number,
|
"taskId": number,
|
||||||
"taskTitle": string,
|
"taskTitle": string,
|
||||||
"complexityScore": number (1-10),
|
"complexityScore": number (1-10),
|
||||||
"recommendedSubtasks": number (${Math.max(3, CONFIG.defaultSubtasks - 1)}-${Math.min(8, CONFIG.defaultSubtasks + 2)}),
|
"recommendedSubtasks": number (${Math.max(3, getDefaultSubtasks() - 1)}-${Math.min(8, getDefaultSubtasks() + 2)}),
|
||||||
"expansionPrompt": string (a specific prompt for generating good subtasks),
|
"expansionPrompt": string (a specific prompt for generating good subtasks),
|
||||||
"reasoning": string (brief explanation of your assessment)
|
"reasoning": string (brief explanation of your assessment)
|
||||||
},
|
},
|
||||||
@@ -1144,7 +1123,8 @@ async function _handleAnthropicStream(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Report progress - use only mcpLog in MCP context and avoid direct reportProgress calls
|
// Report progress - use only mcpLog in MCP context and avoid direct reportProgress calls
|
||||||
const maxTokens = params.max_tokens || CONFIG.maxTokens;
|
// Use getter for maxTokens
|
||||||
|
const maxTokens = params.max_tokens || getMainMaxTokens(session);
|
||||||
const progressPercent = Math.min(
|
const progressPercent = Math.min(
|
||||||
100,
|
100,
|
||||||
(responseText.length / maxTokens) * 100
|
(responseText.length / maxTokens) * 100
|
||||||
@@ -1311,35 +1291,6 @@ function _buildAddTaskPrompt(prompt, contextTasks, { newTaskId } = {}) {
|
|||||||
return { systemPrompt, userPrompt };
|
return { systemPrompt, userPrompt };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get an Anthropic client instance
|
|
||||||
* @param {Object} [session] - Optional session object from MCP
|
|
||||||
* @returns {Anthropic} Anthropic client instance
|
|
||||||
*/
|
|
||||||
function getAnthropicClient(session) {
|
|
||||||
// If we already have a global client and no session, use the global
|
|
||||||
// if (!session && anthropic) {
|
|
||||||
// return anthropic;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Initialize a new client with API key from session or environment
|
|
||||||
const apiKey = resolveEnvVariable('ANTHROPIC_API_KEY', session);
|
|
||||||
|
|
||||||
if (!apiKey) {
|
|
||||||
throw new Error(
|
|
||||||
'ANTHROPIC_API_KEY environment variable is missing. Set it to use AI features.'
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return new Anthropic({
|
|
||||||
apiKey: apiKey,
|
|
||||||
// Add beta header for 128k token output
|
|
||||||
defaultHeaders: {
|
|
||||||
'anthropic-beta': 'output-128k-2025-02-19'
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate a detailed task description using Perplexity AI for research
|
* Generate a detailed task description using Perplexity AI for research
|
||||||
* @param {string} prompt - Task description prompt
|
* @param {string} prompt - Task description prompt
|
||||||
@@ -1358,10 +1309,8 @@ async function generateTaskDescriptionWithPerplexity(
|
|||||||
log('info', `Researching context for task prompt: "${prompt}"`);
|
log('info', `Researching context for task prompt: "${prompt}"`);
|
||||||
const perplexityClient = getPerplexityClient(session);
|
const perplexityClient = getPerplexityClient(session);
|
||||||
|
|
||||||
const PERPLEXITY_MODEL =
|
// Use getter for model ID
|
||||||
process.env.PERPLEXITY_MODEL ||
|
const PERPLEXITY_MODEL = getResearchModelId(session);
|
||||||
session?.env?.PERPLEXITY_MODEL ||
|
|
||||||
'sonar-pro';
|
|
||||||
const researchLoadingIndicator = startLoadingIndicator(
|
const researchLoadingIndicator = startLoadingIndicator(
|
||||||
'Researching best practices with Perplexity AI...'
|
'Researching best practices with Perplexity AI...'
|
||||||
);
|
);
|
||||||
@@ -1381,7 +1330,7 @@ Include concrete code examples and technical considerations where relevant.`;
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
temperature: 0.1, // Lower temperature for more factual responses
|
temperature: 0.1, // Lower temperature for more factual responses
|
||||||
max_tokens: 8700, // Respect maximum input tokens for Perplexity (8719 max)
|
max_tokens: getResearchMaxTokens(session), // Respect maximum input tokens for Perplexity (8719 max)
|
||||||
web_search_options: {
|
web_search_options: {
|
||||||
search_context_size: 'high'
|
search_context_size: 'high'
|
||||||
},
|
},
|
||||||
@@ -1464,12 +1413,12 @@ Return a JSON object with the following structure:
|
|||||||
}
|
}
|
||||||
if (reportProgress) {
|
if (reportProgress) {
|
||||||
await reportProgress({
|
await reportProgress({
|
||||||
progress: (responseText.length / CONFIG.maxTokens) * 100
|
progress: (responseText.length / getMainMaxTokens(session)) * 100
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (mcpLog) {
|
if (mcpLog) {
|
||||||
mcpLog.info(
|
mcpLog.info(
|
||||||
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
|
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1587,8 +1536,8 @@ function parseTasksFromCompletion(completionText) {
|
|||||||
|
|
||||||
// Export AI service functions
|
// Export AI service functions
|
||||||
export {
|
export {
|
||||||
getAnthropicClient,
|
// getAnthropicClient, // Removed - This name is not defined here.
|
||||||
getPerplexityClient,
|
// getPerplexityClient, // Removed - Not defined or imported here.
|
||||||
callClaude,
|
callClaude,
|
||||||
handleStreamingRequest,
|
handleStreamingRequest,
|
||||||
processClaudeResponse,
|
processClaudeResponse,
|
||||||
@@ -1598,11 +1547,11 @@ export {
|
|||||||
parseSubtasksFromText,
|
parseSubtasksFromText,
|
||||||
generateComplexityAnalysisPrompt,
|
generateComplexityAnalysisPrompt,
|
||||||
handleClaudeError,
|
handleClaudeError,
|
||||||
getAvailableAIModel,
|
getAvailableAIModel, // Local function definition
|
||||||
parseTaskJsonResponse,
|
parseTaskJsonResponse,
|
||||||
_buildAddTaskPrompt,
|
_buildAddTaskPrompt,
|
||||||
_handleAnthropicStream,
|
_handleAnthropicStream,
|
||||||
getConfiguredAnthropicClient,
|
getConfiguredAnthropicClient, // Locally defined function
|
||||||
sendChatWithContext,
|
sendChatWithContext,
|
||||||
parseTasksFromCompletion
|
parseTasksFromCompletion
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import inquirer from 'inquirer';
|
|||||||
import ora from 'ora';
|
import ora from 'ora';
|
||||||
import Table from 'cli-table3';
|
import Table from 'cli-table3';
|
||||||
|
|
||||||
import { log, readJSON, writeJSON } from './utils.js';
|
import { log, readJSON } from './utils.js';
|
||||||
import {
|
import {
|
||||||
parsePRD,
|
parsePRD,
|
||||||
updateTasks,
|
updateTasks,
|
||||||
@@ -347,7 +347,7 @@ function registerCommands(programInstance) {
|
|||||||
|
|
||||||
if (useResearch) {
|
if (useResearch) {
|
||||||
// Verify Perplexity API key exists if using research
|
// Verify Perplexity API key exists if using research
|
||||||
if (!process.env.PERPLEXITY_API_KEY) {
|
if (!isApiKeySet('perplexity')) {
|
||||||
console.log(
|
console.log(
|
||||||
chalk.yellow(
|
chalk.yellow(
|
||||||
'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'
|
'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'
|
||||||
@@ -400,7 +400,7 @@ function registerCommands(programInstance) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Use getDebugFlag getter instead of CONFIG.debug
|
// Use getDebugFlag getter instead of CONFIG.debug
|
||||||
if (getDebugFlag(null)) {
|
if (getDebugFlag()) {
|
||||||
console.error(error);
|
console.error(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -500,7 +500,7 @@ function registerCommands(programInstance) {
|
|||||||
|
|
||||||
if (useResearch) {
|
if (useResearch) {
|
||||||
// Verify Perplexity API key exists if using research
|
// Verify Perplexity API key exists if using research
|
||||||
if (!process.env.PERPLEXITY_API_KEY) {
|
if (!isApiKeySet('perplexity')) {
|
||||||
console.log(
|
console.log(
|
||||||
chalk.yellow(
|
chalk.yellow(
|
||||||
'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'
|
'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'
|
||||||
@@ -556,7 +556,7 @@ function registerCommands(programInstance) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Use getDebugFlag getter instead of CONFIG.debug
|
// Use getDebugFlag getter instead of CONFIG.debug
|
||||||
if (getDebugFlag(null)) {
|
if (getDebugFlag()) {
|
||||||
console.error(error);
|
console.error(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -923,7 +923,7 @@ function registerCommands(programInstance) {
|
|||||||
console.log(chalk.gray('Next: Complete this task or add more tasks'));
|
console.log(chalk.gray('Next: Complete this task or add more tasks'));
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error(chalk.red(`Error adding task: ${error.message}`));
|
console.error(chalk.red(`Error adding task: ${error.message}`));
|
||||||
if (error.stack && getDebugFlag(null)) {
|
if (error.stack && getDebugFlag()) {
|
||||||
console.error(error.stack);
|
console.error(error.stack);
|
||||||
}
|
}
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
@@ -2105,7 +2105,7 @@ function registerCommands(programInstance) {
|
|||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
log(`Error processing models command: ${error.message}`, 'error');
|
log(`Error processing models command: ${error.message}`, 'error');
|
||||||
if (error.stack && getDebugFlag(null)) {
|
if (error.stack && getDebugFlag()) {
|
||||||
log(error.stack, 'debug');
|
log(error.stack, 'debug');
|
||||||
}
|
}
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
@@ -2337,7 +2337,7 @@ async function runCLI(argv = process.argv) {
|
|||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
|
|
||||||
if (getDebugFlag(null)) {
|
if (getDebugFlag()) {
|
||||||
console.error(error);
|
console.error(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,8 @@
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import chalk from 'chalk';
|
import chalk from 'chalk';
|
||||||
import boxen from 'boxen';
|
import boxen from 'boxen';
|
||||||
import { Anthropic } from '@anthropic-ai/sdk';
|
// Remove Anthropic import if client is no longer initialized globally
|
||||||
|
// import { Anthropic } from '@anthropic-ai/sdk';
|
||||||
|
|
||||||
import {
|
import {
|
||||||
log,
|
log,
|
||||||
@@ -22,10 +23,10 @@ import { displayBanner } from './ui.js';
|
|||||||
|
|
||||||
import { generateTaskFiles } from './task-manager.js';
|
import { generateTaskFiles } from './task-manager.js';
|
||||||
|
|
||||||
// Initialize Anthropic client
|
// Remove global Anthropic client initialization
|
||||||
const anthropic = new Anthropic({
|
// const anthropic = new Anthropic({
|
||||||
apiKey: process.env.ANTHROPIC_API_KEY
|
// apiKey: process.env.ANTHROPIC_API_KEY
|
||||||
});
|
// });
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a dependency to a task
|
* Add a dependency to a task
|
||||||
|
|||||||
@@ -11,7 +11,15 @@ import {
|
|||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
import { log, readJSON, writeJSON, truncate } from '../utils.js';
|
import { log, readJSON, writeJSON, truncate } from '../utils.js';
|
||||||
import { _handleAnthropicStream } from '../ai-services.js';
|
import { _handleAnthropicStream } from '../ai-services.js';
|
||||||
import { getDefaultPriority } from '../config-manager.js';
|
import {
|
||||||
|
getDefaultPriority,
|
||||||
|
getResearchModelId,
|
||||||
|
getResearchTemperature,
|
||||||
|
getResearchMaxTokens,
|
||||||
|
getMainModelId,
|
||||||
|
getMainTemperature,
|
||||||
|
getMainMaxTokens
|
||||||
|
} from '../config-manager.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a new task using AI
|
* Add a new task using AI
|
||||||
@@ -183,46 +191,26 @@ async function addTask(
|
|||||||
|
|
||||||
if (modelType === 'perplexity') {
|
if (modelType === 'perplexity') {
|
||||||
// Use Perplexity AI
|
// Use Perplexity AI
|
||||||
const perplexityModel =
|
|
||||||
process.env.PERPLEXITY_MODEL ||
|
|
||||||
session?.env?.PERPLEXITY_MODEL ||
|
|
||||||
'sonar-pro';
|
|
||||||
const response = await client.chat.completions.create({
|
const response = await client.chat.completions.create({
|
||||||
model: perplexityModel,
|
model: getResearchModelId(session),
|
||||||
messages: [
|
messages: [
|
||||||
{ role: 'system', content: systemPrompt },
|
{ role: 'system', content: systemPrompt },
|
||||||
{ role: 'user', content: userPrompt }
|
{ role: 'user', content: userPrompt }
|
||||||
],
|
],
|
||||||
temperature: parseFloat(
|
temperature: getResearchTemperature(session),
|
||||||
process.env.TEMPERATURE ||
|
max_tokens: getResearchMaxTokens(session)
|
||||||
session?.env?.TEMPERATURE ||
|
|
||||||
CONFIG.temperature
|
|
||||||
),
|
|
||||||
max_tokens: parseInt(
|
|
||||||
process.env.MAX_TOKENS ||
|
|
||||||
session?.env?.MAX_TOKENS ||
|
|
||||||
CONFIG.maxTokens
|
|
||||||
)
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const responseText = response.choices[0].message.content;
|
const responseText = response.choices[0].message.content;
|
||||||
aiGeneratedTaskData = parseTaskJsonResponse(responseText);
|
aiGeneratedTaskData = parseTaskJsonResponse(responseText);
|
||||||
} else {
|
} else {
|
||||||
// Use Claude (default)
|
// Use Claude (default)
|
||||||
// Prepare API parameters
|
// Prepare API parameters using getters, preserving customEnv override
|
||||||
const apiParams = {
|
const apiParams = {
|
||||||
model:
|
model: customEnv?.ANTHROPIC_MODEL || getMainModelId(session),
|
||||||
session?.env?.ANTHROPIC_MODEL ||
|
max_tokens: customEnv?.MAX_TOKENS || getMainMaxTokens(session),
|
||||||
CONFIG.model ||
|
|
||||||
customEnv?.ANTHROPIC_MODEL,
|
|
||||||
max_tokens:
|
|
||||||
session?.env?.MAX_TOKENS ||
|
|
||||||
CONFIG.maxTokens ||
|
|
||||||
customEnv?.MAX_TOKENS,
|
|
||||||
temperature:
|
temperature:
|
||||||
session?.env?.TEMPERATURE ||
|
customEnv?.TEMPERATURE || getMainTemperature(session),
|
||||||
CONFIG.temperature ||
|
|
||||||
customEnv?.TEMPERATURE,
|
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
messages: [{ role: 'user', content: userPrompt }]
|
messages: [{ role: 'user', content: userPrompt }]
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -8,7 +8,17 @@ import { startLoadingIndicator, stopLoadingIndicator } from '../ui.js';
|
|||||||
|
|
||||||
import { generateComplexityAnalysisPrompt } from '../ai-services.js';
|
import { generateComplexityAnalysisPrompt } from '../ai-services.js';
|
||||||
|
|
||||||
import { getDebugFlag } from '../config-manager.js';
|
import {
|
||||||
|
getDebugFlag,
|
||||||
|
getProjectName,
|
||||||
|
getMainModelId,
|
||||||
|
getMainMaxTokens,
|
||||||
|
getMainTemperature,
|
||||||
|
getResearchModelId,
|
||||||
|
getResearchMaxTokens,
|
||||||
|
getResearchTemperature,
|
||||||
|
getDefaultSubtasks
|
||||||
|
} from '../config-manager.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Analyzes task complexity and generates expansion recommendations
|
* Analyzes task complexity and generates expansion recommendations
|
||||||
@@ -127,6 +137,83 @@ async function analyzeTaskComplexity(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If after filtering, there are no tasks left to analyze, exit early.
|
||||||
|
if (tasksData.tasks.length === 0) {
|
||||||
|
const emptyReport = {
|
||||||
|
meta: {
|
||||||
|
generatedAt: new Date().toISOString(),
|
||||||
|
tasksAnalyzed: tasksData.tasks.length,
|
||||||
|
thresholdScore: thresholdScore,
|
||||||
|
projectName: getProjectName(session),
|
||||||
|
usedResearch: useResearch
|
||||||
|
},
|
||||||
|
complexityAnalysis: []
|
||||||
|
};
|
||||||
|
// Write the report to file
|
||||||
|
reportLog(`Writing complexity report to ${outputPath}...`, 'info');
|
||||||
|
writeJSON(outputPath, emptyReport);
|
||||||
|
|
||||||
|
reportLog(
|
||||||
|
`Task complexity analysis complete. Report written to ${outputPath}`,
|
||||||
|
'success'
|
||||||
|
);
|
||||||
|
|
||||||
|
// Only show UI elements for text output (CLI)
|
||||||
|
if (outputFormat === 'text') {
|
||||||
|
console.log(
|
||||||
|
chalk.green(
|
||||||
|
`Task complexity analysis complete. Report written to ${outputPath}`
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Display a summary of findings
|
||||||
|
const highComplexity = emptyReport.complexityAnalysis.filter(
|
||||||
|
(t) => t.complexityScore >= 8
|
||||||
|
).length;
|
||||||
|
const mediumComplexity = emptyReport.complexityAnalysis.filter(
|
||||||
|
(t) => t.complexityScore >= 5 && t.complexityScore < 8
|
||||||
|
).length;
|
||||||
|
const lowComplexity = emptyReport.complexityAnalysis.filter(
|
||||||
|
(t) => t.complexityScore < 5
|
||||||
|
).length;
|
||||||
|
const totalAnalyzed = emptyReport.complexityAnalysis.length;
|
||||||
|
|
||||||
|
console.log('\nComplexity Analysis Summary:');
|
||||||
|
console.log('----------------------------');
|
||||||
|
console.log(`Tasks in input file: ${tasksData.tasks.length}`);
|
||||||
|
console.log(`Tasks successfully analyzed: ${totalAnalyzed}`);
|
||||||
|
console.log(`High complexity tasks: ${highComplexity}`);
|
||||||
|
console.log(`Medium complexity tasks: ${mediumComplexity}`);
|
||||||
|
console.log(`Low complexity tasks: ${lowComplexity}`);
|
||||||
|
console.log(
|
||||||
|
`Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})`
|
||||||
|
);
|
||||||
|
console.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);
|
||||||
|
console.log(
|
||||||
|
`\nSee ${outputPath} for the full report and expansion commands.`
|
||||||
|
);
|
||||||
|
|
||||||
|
// Show next steps suggestions
|
||||||
|
console.log(
|
||||||
|
boxen(
|
||||||
|
chalk.white.bold('Suggested Next Steps:') +
|
||||||
|
'\n\n' +
|
||||||
|
`${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\n` +
|
||||||
|
`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\n` +
|
||||||
|
`${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`,
|
||||||
|
{
|
||||||
|
padding: 1,
|
||||||
|
borderColor: 'cyan',
|
||||||
|
borderStyle: 'round',
|
||||||
|
margin: { top: 1 }
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return emptyReport;
|
||||||
|
}
|
||||||
|
|
||||||
// Prepare the prompt for the LLM
|
// Prepare the prompt for the LLM
|
||||||
const prompt = generateComplexityAnalysisPrompt(tasksData);
|
const prompt = generateComplexityAnalysisPrompt(tasksData);
|
||||||
|
|
||||||
@@ -183,11 +270,9 @@ Your response must be a clean JSON array only, following exactly this format:
|
|||||||
|
|
||||||
DO NOT include any text before or after the JSON array. No explanations, no markdown formatting.`;
|
DO NOT include any text before or after the JSON array. No explanations, no markdown formatting.`;
|
||||||
|
|
||||||
|
// Keep the direct AI call for now, use config getters for parameters
|
||||||
const result = await perplexity.chat.completions.create({
|
const result = await perplexity.chat.completions.create({
|
||||||
model:
|
model: getResearchModelId(session),
|
||||||
process.env.PERPLEXITY_MODEL ||
|
|
||||||
session?.env?.PERPLEXITY_MODEL ||
|
|
||||||
'sonar-pro',
|
|
||||||
messages: [
|
messages: [
|
||||||
{
|
{
|
||||||
role: 'system',
|
role: 'system',
|
||||||
@@ -199,8 +284,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
|||||||
content: researchPrompt
|
content: researchPrompt
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
temperature: getResearchTemperature(session),
|
||||||
max_tokens: 8700,
|
max_tokens: getResearchMaxTokens(session),
|
||||||
web_search_options: {
|
web_search_options: {
|
||||||
search_context_size: 'high'
|
search_context_size: 'high'
|
||||||
},
|
},
|
||||||
@@ -236,6 +321,12 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
|||||||
console.log(chalk.gray('Response first 200 chars:'));
|
console.log(chalk.gray('Response first 200 chars:'));
|
||||||
console.log(chalk.gray(fullResponse.substring(0, 200)));
|
console.log(chalk.gray(fullResponse.substring(0, 200)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (getDebugFlag(session)) {
|
||||||
|
console.debug(
|
||||||
|
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
|
||||||
|
);
|
||||||
|
}
|
||||||
} catch (perplexityError) {
|
} catch (perplexityError) {
|
||||||
reportLog(
|
reportLog(
|
||||||
`Falling back to Claude for complexity analysis: ${perplexityError.message}`,
|
`Falling back to Claude for complexity analysis: ${perplexityError.message}`,
|
||||||
@@ -287,12 +378,11 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call the LLM API with streaming
|
// Keep the direct AI call for now, use config getters for parameters
|
||||||
const stream = await anthropic.messages.create({
|
const stream = await anthropic.messages.create({
|
||||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
max_tokens: getMainMaxTokens(session),
|
||||||
model:
|
model: modelOverride || getMainModelId(session),
|
||||||
modelOverride || CONFIG.model || session?.env?.ANTHROPIC_MODEL,
|
temperature: getMainTemperature(session),
|
||||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
|
||||||
messages: [{ role: 'user', content: prompt }],
|
messages: [{ role: 'user', content: prompt }],
|
||||||
system:
|
system:
|
||||||
'You are an expert software architect and project manager analyzing task complexity. Respond only with valid JSON.',
|
'You are an expert software architect and project manager analyzing task complexity. Respond only with valid JSON.',
|
||||||
@@ -318,12 +408,13 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
|||||||
}
|
}
|
||||||
if (reportProgress) {
|
if (reportProgress) {
|
||||||
await reportProgress({
|
await reportProgress({
|
||||||
progress: (fullResponse.length / CONFIG.maxTokens) * 100
|
progress:
|
||||||
|
(fullResponse.length / getMainMaxTokens(session)) * 100
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (mcpLog) {
|
if (mcpLog) {
|
||||||
mcpLog.info(
|
mcpLog.info(
|
||||||
`Progress: ${(fullResponse.length / CONFIG.maxTokens) * 100}%`
|
`Progress: ${(fullResponse.length / getMainMaxTokens(session)) * 100}%`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -797,7 +888,7 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
|||||||
generatedAt: new Date().toISOString(),
|
generatedAt: new Date().toISOString(),
|
||||||
tasksAnalyzed: tasksData.tasks.length,
|
tasksAnalyzed: tasksData.tasks.length,
|
||||||
thresholdScore: thresholdScore,
|
thresholdScore: thresholdScore,
|
||||||
projectName: tasksData.meta?.projectName || 'Your Project Name',
|
projectName: getProjectName(session),
|
||||||
usedResearch: useResearch
|
usedResearch: useResearch
|
||||||
},
|
},
|
||||||
complexityAnalysis: complexityAnalysis
|
complexityAnalysis: complexityAnalysis
|
||||||
@@ -865,6 +956,12 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if (getDebugFlag(session)) {
|
||||||
|
console.debug(
|
||||||
|
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return finalReport;
|
return finalReport;
|
||||||
@@ -885,8 +982,7 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
|||||||
console.error(
|
console.error(
|
||||||
chalk.red(`Error parsing complexity analysis: ${error.message}`)
|
chalk.red(`Error parsing complexity analysis: ${error.message}`)
|
||||||
);
|
);
|
||||||
if (getDebugFlag()) {
|
if (getDebugFlag(session)) {
|
||||||
// Use getter
|
|
||||||
console.debug(
|
console.debug(
|
||||||
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
|
chalk.gray(`Raw response: ${fullResponse.substring(0, 500)}...`)
|
||||||
);
|
);
|
||||||
@@ -931,8 +1027,7 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getDebugFlag()) {
|
if (getDebugFlag(session)) {
|
||||||
// Use getter
|
|
||||||
console.error(error);
|
console.error(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,12 @@ import {
|
|||||||
parseSubtasksFromText
|
parseSubtasksFromText
|
||||||
} from '../ai-services.js';
|
} from '../ai-services.js';
|
||||||
|
|
||||||
import { getDefaultSubtasks } from '../config-manager.js';
|
import {
|
||||||
|
getDefaultSubtasks,
|
||||||
|
getMainModelId,
|
||||||
|
getMainMaxTokens,
|
||||||
|
getMainTemperature
|
||||||
|
} from '../config-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -207,11 +212,11 @@ Return exactly ${subtaskCount} subtasks with the following JSON structure:
|
|||||||
|
|
||||||
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
|
Note on dependencies: Subtasks can depend on other subtasks with lower IDs. Use an empty array if there are no dependencies.`;
|
||||||
|
|
||||||
// Prepare API parameters
|
// Prepare API parameters using getters
|
||||||
const apiParams = {
|
const apiParams = {
|
||||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
model: getMainModelId(session),
|
||||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
max_tokens: getMainMaxTokens(session),
|
||||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
temperature: getMainTemperature(session),
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
messages: [{ role: 'user', content: userPrompt }]
|
messages: [{ role: 'user', content: userPrompt }]
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -6,6 +6,16 @@ import {
|
|||||||
parseSubtasksFromText
|
parseSubtasksFromText
|
||||||
} from '../ai-services.js';
|
} from '../ai-services.js';
|
||||||
|
|
||||||
|
// Import necessary config getters
|
||||||
|
import {
|
||||||
|
getMainModelId,
|
||||||
|
getMainMaxTokens,
|
||||||
|
getMainTemperature,
|
||||||
|
getResearchModelId,
|
||||||
|
getResearchMaxTokens,
|
||||||
|
getResearchTemperature
|
||||||
|
} from '../config-manager.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Call AI to generate subtasks based on a prompt
|
* Call AI to generate subtasks based on a prompt
|
||||||
* @param {string} prompt - The prompt to send to the AI
|
* @param {string} prompt - The prompt to send to the AI
|
||||||
@@ -26,9 +36,9 @@ async function getSubtasksFromAI(
|
|||||||
|
|
||||||
// Prepare API parameters
|
// Prepare API parameters
|
||||||
const apiParams = {
|
const apiParams = {
|
||||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
model: getMainModelId(session),
|
||||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
max_tokens: getMainMaxTokens(session),
|
||||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
temperature: getMainTemperature(session),
|
||||||
system:
|
system:
|
||||||
'You are an AI assistant helping with task breakdown for software development.',
|
'You are an AI assistant helping with task breakdown for software development.',
|
||||||
messages: [{ role: 'user', content: prompt }]
|
messages: [{ role: 'user', content: prompt }]
|
||||||
@@ -46,10 +56,7 @@ async function getSubtasksFromAI(
|
|||||||
mcpLog.info('Using Perplexity AI for research-backed subtasks');
|
mcpLog.info('Using Perplexity AI for research-backed subtasks');
|
||||||
}
|
}
|
||||||
|
|
||||||
const perplexityModel =
|
const perplexityModel = getResearchModelId(session);
|
||||||
process.env.PERPLEXITY_MODEL ||
|
|
||||||
session?.env?.PERPLEXITY_MODEL ||
|
|
||||||
'sonar-pro';
|
|
||||||
const result = await perplexity.chat.completions.create({
|
const result = await perplexity.chat.completions.create({
|
||||||
model: perplexityModel,
|
model: perplexityModel,
|
||||||
messages: [
|
messages: [
|
||||||
@@ -60,8 +67,8 @@ async function getSubtasksFromAI(
|
|||||||
},
|
},
|
||||||
{ role: 'user', content: prompt }
|
{ role: 'user', content: prompt }
|
||||||
],
|
],
|
||||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
temperature: getResearchTemperature(session),
|
||||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens
|
max_tokens: getResearchMaxTokens(session)
|
||||||
});
|
});
|
||||||
|
|
||||||
responseText = result.choices[0].message.content;
|
responseText = result.choices[0].message.content;
|
||||||
|
|||||||
@@ -97,7 +97,8 @@ async function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {
|
|||||||
if (!options?.mcpLog) {
|
if (!options?.mcpLog) {
|
||||||
console.error(chalk.red(`Error: ${error.message}`));
|
console.error(chalk.red(`Error: ${error.message}`));
|
||||||
|
|
||||||
if (getDebugFlag()) {
|
// Pass session to getDebugFlag
|
||||||
|
if (getDebugFlag(options?.session)) {
|
||||||
// Use getter
|
// Use getter
|
||||||
console.error(error);
|
console.error(error);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,15 @@ import {
|
|||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
import { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';
|
import { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';
|
||||||
import { getAvailableAIModel } from '../ai-services.js';
|
import { getAvailableAIModel } from '../ai-services.js';
|
||||||
import { getDebugFlag } from '../config-manager.js';
|
import {
|
||||||
|
getDebugFlag,
|
||||||
|
getMainModelId,
|
||||||
|
getMainMaxTokens,
|
||||||
|
getMainTemperature,
|
||||||
|
getResearchModelId,
|
||||||
|
getResearchMaxTokens,
|
||||||
|
getResearchTemperature
|
||||||
|
} from '../config-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -231,26 +239,15 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
|||||||
|
|
||||||
if (modelType === 'perplexity') {
|
if (modelType === 'perplexity') {
|
||||||
// Construct Perplexity payload
|
// Construct Perplexity payload
|
||||||
const perplexityModel =
|
const perplexityModel = getResearchModelId(session);
|
||||||
process.env.PERPLEXITY_MODEL ||
|
|
||||||
session?.env?.PERPLEXITY_MODEL ||
|
|
||||||
'sonar-pro';
|
|
||||||
const response = await client.chat.completions.create({
|
const response = await client.chat.completions.create({
|
||||||
model: perplexityModel,
|
model: perplexityModel,
|
||||||
messages: [
|
messages: [
|
||||||
{ role: 'system', content: systemPrompt },
|
{ role: 'system', content: systemPrompt },
|
||||||
{ role: 'user', content: userMessageContent }
|
{ role: 'user', content: userMessageContent }
|
||||||
],
|
],
|
||||||
temperature: parseFloat(
|
temperature: getResearchTemperature(session),
|
||||||
process.env.TEMPERATURE ||
|
max_tokens: getResearchMaxTokens(session)
|
||||||
session?.env?.TEMPERATURE ||
|
|
||||||
CONFIG.temperature
|
|
||||||
),
|
|
||||||
max_tokens: parseInt(
|
|
||||||
process.env.MAX_TOKENS ||
|
|
||||||
session?.env?.MAX_TOKENS ||
|
|
||||||
CONFIG.maxTokens
|
|
||||||
)
|
|
||||||
});
|
});
|
||||||
additionalInformation = response.choices[0].message.content.trim();
|
additionalInformation = response.choices[0].message.content.trim();
|
||||||
} else {
|
} else {
|
||||||
@@ -272,11 +269,11 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
|||||||
}, 500);
|
}, 500);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct Claude payload
|
// Construct Claude payload using config getters
|
||||||
const stream = await client.messages.create({
|
const stream = await client.messages.create({
|
||||||
model: CONFIG.model,
|
model: getMainModelId(session),
|
||||||
max_tokens: CONFIG.maxTokens,
|
max_tokens: getMainMaxTokens(session),
|
||||||
temperature: CONFIG.temperature,
|
temperature: getMainTemperature(session),
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
messages: [{ role: 'user', content: userMessageContent }],
|
messages: [{ role: 'user', content: userMessageContent }],
|
||||||
stream: true
|
stream: true
|
||||||
@@ -288,12 +285,13 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
|||||||
}
|
}
|
||||||
if (reportProgress) {
|
if (reportProgress) {
|
||||||
await reportProgress({
|
await reportProgress({
|
||||||
progress: (responseText.length / CONFIG.maxTokens) * 100
|
progress:
|
||||||
|
(responseText.length / getMainMaxTokens(session)) * 100
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (mcpLog) {
|
if (mcpLog) {
|
||||||
mcpLog.info(
|
mcpLog.info(
|
||||||
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
|
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -540,7 +538,7 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
|||||||
' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here'
|
' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here'
|
||||||
);
|
);
|
||||||
console.log(
|
console.log(
|
||||||
' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt=\"...\"'
|
' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt="..."'
|
||||||
);
|
);
|
||||||
} else if (error.message?.includes('overloaded')) {
|
} else if (error.message?.includes('overloaded')) {
|
||||||
// Catch final overload error
|
// Catch final overload error
|
||||||
@@ -568,7 +566,7 @@ Provide concrete examples, code snippets, or implementation details when relevan
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getDebugFlag()) {
|
if (getDebugFlag(session)) {
|
||||||
// Use getter
|
// Use getter
|
||||||
console.error(error);
|
console.error(error);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,16 @@ import {
|
|||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
|
|
||||||
import { _handleAnthropicStream } from '../ai-services.js';
|
import { _handleAnthropicStream } from '../ai-services.js';
|
||||||
import { getDebugFlag } from '../config-manager.js';
|
import {
|
||||||
|
getDebugFlag,
|
||||||
|
getMainModelId,
|
||||||
|
getMainMaxTokens,
|
||||||
|
getMainTemperature,
|
||||||
|
getResearchModelId,
|
||||||
|
getResearchMaxTokens,
|
||||||
|
getResearchTemperature,
|
||||||
|
isApiKeySet
|
||||||
|
} from '../config-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -64,15 +73,10 @@ async function updateTaskById(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate research flag
|
// Validate research flag and API key
|
||||||
if (
|
if (useResearch && !isApiKeySet('perplexity', session)) {
|
||||||
useResearch &&
|
|
||||||
(!perplexity ||
|
|
||||||
!process.env.PERPLEXITY_API_KEY ||
|
|
||||||
session?.env?.PERPLEXITY_API_KEY)
|
|
||||||
) {
|
|
||||||
report(
|
report(
|
||||||
'Perplexity AI is not available. Falling back to Claude AI.',
|
'Perplexity AI research requested but API key is not set. Falling back to main AI.',
|
||||||
'warn'
|
'warn'
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -274,7 +278,7 @@ The changes described in the prompt should be thoughtfully applied to make the t
|
|||||||
session?.env?.PERPLEXITY_MODEL ||
|
session?.env?.PERPLEXITY_MODEL ||
|
||||||
'sonar-pro';
|
'sonar-pro';
|
||||||
const result = await client.chat.completions.create({
|
const result = await client.chat.completions.create({
|
||||||
model: perplexityModel,
|
model: getResearchModelId(session),
|
||||||
messages: [
|
messages: [
|
||||||
{
|
{
|
||||||
role: 'system',
|
role: 'system',
|
||||||
@@ -293,12 +297,8 @@ IMPORTANT: In the task JSON above, any subtasks with "status": "done" or "status
|
|||||||
Return only the updated task as a valid JSON object.`
|
Return only the updated task as a valid JSON object.`
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
temperature: parseFloat(
|
temperature: getResearchTemperature(session),
|
||||||
process.env.TEMPERATURE ||
|
max_tokens: getResearchMaxTokens(session)
|
||||||
session?.env?.TEMPERATURE ||
|
|
||||||
CONFIG.temperature
|
|
||||||
),
|
|
||||||
max_tokens: 8700
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const responseText = result.choices[0].message.content;
|
const responseText = result.choices[0].message.content;
|
||||||
@@ -343,9 +343,9 @@ Return only the updated task as a valid JSON object.`
|
|||||||
|
|
||||||
// Use streaming API call
|
// Use streaming API call
|
||||||
const stream = await client.messages.create({
|
const stream = await client.messages.create({
|
||||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
model: getMainModelId(session),
|
||||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
max_tokens: getMainMaxTokens(session),
|
||||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
temperature: getMainTemperature(session),
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
messages: [
|
messages: [
|
||||||
{
|
{
|
||||||
@@ -371,12 +371,13 @@ Return only the updated task as a valid JSON object.`
|
|||||||
}
|
}
|
||||||
if (reportProgress) {
|
if (reportProgress) {
|
||||||
await reportProgress({
|
await reportProgress({
|
||||||
progress: (responseText.length / CONFIG.maxTokens) * 100
|
progress:
|
||||||
|
(responseText.length / getMainMaxTokens(session)) * 100
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (mcpLog) {
|
if (mcpLog) {
|
||||||
mcpLog.info(
|
mcpLog.info(
|
||||||
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
|
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -667,7 +668,7 @@ Return only the updated task as a valid JSON object.`
|
|||||||
console.log(' 2. Use a valid task ID with the --id parameter');
|
console.log(' 2. Use a valid task ID with the --id parameter');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getDebugFlag()) {
|
if (getDebugFlag(session)) {
|
||||||
// Use getter
|
// Use getter
|
||||||
console.error(error);
|
console.error(error);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,15 @@ import {
|
|||||||
stopLoadingIndicator
|
stopLoadingIndicator
|
||||||
} from '../ui.js';
|
} from '../ui.js';
|
||||||
|
|
||||||
import { getDebugFlag } from '../config-manager.js';
|
import {
|
||||||
|
getDebugFlag,
|
||||||
|
getResearchModelId,
|
||||||
|
getResearchTemperature,
|
||||||
|
getResearchMaxTokens,
|
||||||
|
getMainModelId,
|
||||||
|
getMainMaxTokens,
|
||||||
|
getMainTemperature
|
||||||
|
} from '../config-manager.js';
|
||||||
import generateTaskFiles from './generate-task-files.js';
|
import generateTaskFiles from './generate-task-files.js';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -204,13 +212,9 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (modelType === 'perplexity') {
|
if (modelType === 'perplexity') {
|
||||||
// Call Perplexity AI using proper format
|
// Call Perplexity AI using proper format and getters
|
||||||
const perplexityModel =
|
|
||||||
process.env.PERPLEXITY_MODEL ||
|
|
||||||
session?.env?.PERPLEXITY_MODEL ||
|
|
||||||
'sonar-pro';
|
|
||||||
const result = await client.chat.completions.create({
|
const result = await client.chat.completions.create({
|
||||||
model: perplexityModel,
|
model: getResearchModelId(session),
|
||||||
messages: [
|
messages: [
|
||||||
{
|
{
|
||||||
role: 'system',
|
role: 'system',
|
||||||
@@ -218,23 +222,11 @@ The changes described in the prompt should be applied to ALL tasks in the list.`
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: `Here are the tasks to update:
|
content: `Here are the tasks to update:\n${taskData}\n\nPlease update these tasks based on the following new context:\n${prompt}\n\nIMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.\n\nReturn only the updated tasks as a valid JSON array.`
|
||||||
${taskData}
|
|
||||||
|
|
||||||
Please update these tasks based on the following new context:
|
|
||||||
${prompt}
|
|
||||||
|
|
||||||
IMPORTANT: In the tasks JSON above, any subtasks with "status": "done" or "status": "completed" should be preserved exactly as is. Build your changes around these completed items.
|
|
||||||
|
|
||||||
Return only the updated tasks as a valid JSON array.`
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
temperature: parseFloat(
|
temperature: getResearchTemperature(session),
|
||||||
process.env.TEMPERATURE ||
|
max_tokens: getResearchMaxTokens(session)
|
||||||
session?.env?.TEMPERATURE ||
|
|
||||||
CONFIG.temperature
|
|
||||||
),
|
|
||||||
max_tokens: 8700
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const responseText = result.choices[0].message.content;
|
const responseText = result.choices[0].message.content;
|
||||||
@@ -270,11 +262,11 @@ Return only the updated tasks as a valid JSON array.`
|
|||||||
}, 500);
|
}, 500);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use streaming API call
|
// Use streaming API call with getters
|
||||||
const stream = await client.messages.create({
|
const stream = await client.messages.create({
|
||||||
model: session?.env?.ANTHROPIC_MODEL || CONFIG.model,
|
model: getMainModelId(session),
|
||||||
max_tokens: session?.env?.MAX_TOKENS || CONFIG.maxTokens,
|
max_tokens: getMainMaxTokens(session),
|
||||||
temperature: session?.env?.TEMPERATURE || CONFIG.temperature,
|
temperature: getMainTemperature(session),
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
messages: [
|
messages: [
|
||||||
{
|
{
|
||||||
@@ -300,12 +292,13 @@ Return only the updated task as a valid JSON object.`
|
|||||||
}
|
}
|
||||||
if (reportProgress) {
|
if (reportProgress) {
|
||||||
await reportProgress({
|
await reportProgress({
|
||||||
progress: (responseText.length / CONFIG.maxTokens) * 100
|
progress:
|
||||||
|
(responseText.length / getMainMaxTokens(session)) * 100
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (mcpLog) {
|
if (mcpLog) {
|
||||||
mcpLog.info(
|
mcpLog.info(
|
||||||
`Progress: ${(responseText.length / CONFIG.maxTokens) * 100}%`
|
`Progress: ${(responseText.length / getMainMaxTokens(session)) * 100}%`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,16 @@ import {
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import fs from 'fs';
|
import fs from 'fs';
|
||||||
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
import { findNextTask, analyzeTaskComplexity } from './task-manager.js';
|
||||||
import { getProjectName, getDefaultSubtasks } from './config-manager.js';
|
import {
|
||||||
|
getProjectName,
|
||||||
|
getDefaultSubtasks,
|
||||||
|
getMainModelId,
|
||||||
|
getMainMaxTokens,
|
||||||
|
getMainTemperature,
|
||||||
|
getDebugFlag,
|
||||||
|
getLogLevel,
|
||||||
|
getDefaultPriority
|
||||||
|
} from './config-manager.js';
|
||||||
|
|
||||||
// Create a color gradient for the banner
|
// Create a color gradient for the banner
|
||||||
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
const coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);
|
||||||
@@ -591,17 +600,17 @@ function displayHelp() {
|
|||||||
[
|
[
|
||||||
`${chalk.yellow('MODEL')}${chalk.reset('')}`,
|
`${chalk.yellow('MODEL')}${chalk.reset('')}`,
|
||||||
`${chalk.white('Claude model to use')}${chalk.reset('')}`,
|
`${chalk.white('Claude model to use')}${chalk.reset('')}`,
|
||||||
`${chalk.dim(`Default: ${CONFIG.model}`)}${chalk.reset('')}`
|
`${chalk.dim(`Default: ${getMainModelId()}`)}${chalk.reset('')}`
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
`${chalk.yellow('MAX_TOKENS')}${chalk.reset('')}`,
|
`${chalk.yellow('MAX_TOKENS')}${chalk.reset('')}`,
|
||||||
`${chalk.white('Maximum tokens for responses')}${chalk.reset('')}`,
|
`${chalk.white('Maximum tokens for responses')}${chalk.reset('')}`,
|
||||||
`${chalk.dim(`Default: ${CONFIG.maxTokens}`)}${chalk.reset('')}`
|
`${chalk.dim(`Default: ${getMainMaxTokens()}`)}${chalk.reset('')}`
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
`${chalk.yellow('TEMPERATURE')}${chalk.reset('')}`,
|
`${chalk.yellow('TEMPERATURE')}${chalk.reset('')}`,
|
||||||
`${chalk.white('Temperature for model responses')}${chalk.reset('')}`,
|
`${chalk.white('Temperature for model responses')}${chalk.reset('')}`,
|
||||||
`${chalk.dim(`Default: ${CONFIG.temperature}`)}${chalk.reset('')}`
|
`${chalk.dim(`Default: ${getMainTemperature()}`)}${chalk.reset('')}`
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
`${chalk.yellow('PERPLEXITY_API_KEY')}${chalk.reset('')}`,
|
`${chalk.yellow('PERPLEXITY_API_KEY')}${chalk.reset('')}`,
|
||||||
@@ -616,27 +625,27 @@ function displayHelp() {
|
|||||||
[
|
[
|
||||||
`${chalk.yellow('DEBUG')}${chalk.reset('')}`,
|
`${chalk.yellow('DEBUG')}${chalk.reset('')}`,
|
||||||
`${chalk.white('Enable debug logging')}${chalk.reset('')}`,
|
`${chalk.white('Enable debug logging')}${chalk.reset('')}`,
|
||||||
`${chalk.dim(`Default: ${CONFIG.debug}`)}${chalk.reset('')}`
|
`${chalk.dim(`Default: ${getDebugFlag()}`)}${chalk.reset('')}`
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
`${chalk.yellow('LOG_LEVEL')}${chalk.reset('')}`,
|
`${chalk.yellow('LOG_LEVEL')}${chalk.reset('')}`,
|
||||||
`${chalk.white('Console output level (debug,info,warn,error)')}${chalk.reset('')}`,
|
`${chalk.white('Console output level (debug,info,warn,error)')}${chalk.reset('')}`,
|
||||||
`${chalk.dim(`Default: ${CONFIG.logLevel}`)}${chalk.reset('')}`
|
`${chalk.dim(`Default: ${getLogLevel()}`)}${chalk.reset('')}`
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
`${chalk.yellow('DEFAULT_SUBTASKS')}${chalk.reset('')}`,
|
`${chalk.yellow('DEFAULT_SUBTASKS')}${chalk.reset('')}`,
|
||||||
`${chalk.white('Default number of subtasks to generate')}${chalk.reset('')}`,
|
`${chalk.white('Default number of subtasks to generate')}${chalk.reset('')}`,
|
||||||
`${chalk.dim(`Default: ${CONFIG.defaultSubtasks}`)}${chalk.reset('')}`
|
`${chalk.dim(`Default: ${getDefaultSubtasks()}`)}${chalk.reset('')}`
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
`${chalk.yellow('DEFAULT_PRIORITY')}${chalk.reset('')}`,
|
`${chalk.yellow('DEFAULT_PRIORITY')}${chalk.reset('')}`,
|
||||||
`${chalk.white('Default task priority')}${chalk.reset('')}`,
|
`${chalk.white('Default task priority')}${chalk.reset('')}`,
|
||||||
`${chalk.dim(`Default: ${CONFIG.defaultPriority}`)}${chalk.reset('')}`
|
`${chalk.dim(`Default: ${getDefaultPriority()}`)}${chalk.reset('')}`
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
`${chalk.yellow('PROJECT_NAME')}${chalk.reset('')}`,
|
`${chalk.yellow('PROJECT_NAME')}${chalk.reset('')}`,
|
||||||
`${chalk.white('Project name displayed in UI')}${chalk.reset('')}`,
|
`${chalk.white('Project name displayed in UI')}${chalk.reset('')}`,
|
||||||
`${chalk.dim(`Default: ${CONFIG.projectName}`)}${chalk.reset('')}`
|
`${chalk.dim(`Default: ${getProjectName()}`)}${chalk.reset('')}`
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user