refactor(analyze): Align complexity analysis with unified AI service

Refactored the  feature and related components (CLI command, MCP tool, direct function) to integrate with the unified AI service layer ().

Initially,  was implemented to leverage structured output generation. However, this approach encountered persistent errors:
- Perplexity provider returned internal server errors.
- Anthropic provider failed with schema type and model errors.

Due to the unreliability of  for this specific use case, the core AI interaction within  was reverted to use . Basic manual JSON parsing and cleanup logic for the text response were reintroduced.

Key changes include:
- Removed direct AI client initialization (Anthropic, Perplexity).
- Removed direct fetching of AI model configuration parameters.
- Removed manual AI retry/fallback/streaming logic.
- Replaced direct AI calls with a call to .
- Updated  wrapper to pass session context correctly.
- Updated  MCP tool for correct path resolution and argument passing.
- Updated  CLI command for correct path resolution.
- Preserved core functionality: task loading/filtering, report generation, CLI summary display.

Both the CLI command ([INFO] Initialized Perplexity client with OpenAI compatibility layer
[INFO] Initialized Perplexity client with OpenAI compatibility layer
Analyzing task complexity from: tasks/tasks.json
Output report will be saved to: scripts/task-complexity-report.json
Analyzing task complexity and generating expansion recommendations...
[INFO] Reading tasks from tasks/tasks.json...
[INFO] Found 62 total tasks in the task file.
[INFO] Skipping 31 tasks marked as done/cancelled/deferred. Analyzing 31 active tasks.
Skipping 31 tasks marked as done/cancelled/deferred. Analyzing 31 active tasks.
[INFO] Claude API attempt 1/2
[ERROR] Error in Claude API call: 400 {"type":"error","error":{"type":"invalid_request_error","message":"max_tokens: 100000 > 64000, which is the maximum allowed number of output tokens for claude-3-7-sonnet-20250219"}}
[ERROR] Non-overload Claude API error: 400 {"type":"error","error":{"type":"invalid_request_error","message":"max_tokens: 100000 > 64000, which is the maximum allowed number of output tokens for claude-3-7-sonnet-20250219"}}
Claude API error: 400 {"type":"error","error":{"type":"invalid_request_error","message":"max_tokens: 100000 > 64000, which is the maximum allowed number of output tokens for claude-3-7-sonnet-20250219"}}
[ERROR] Error during AI analysis: 400 {"type":"error","error":{"type":"invalid_request_error","message":"max_tokens: 100000 > 64000, which is the maximum allowed number of output tokens for claude-3-7-sonnet-20250219"}}
[ERROR] Error analyzing task complexity: 400 {"type":"error","error":{"type":"invalid_request_error","message":"max_tokens: 100000 > 64000, which is the maximum allowed number of output tokens for claude-3-7-sonnet-20250219"}}) and the MCP tool () have been verified to work correctly with this revised approach.
This commit is contained in:
Eyal Toledano
2025-04-24 22:33:33 -04:00
parent 205a11e82c
commit 70cc15bc87
7 changed files with 973 additions and 1086 deletions

View File

@@ -2,12 +2,11 @@
* Direct function wrapper for analyzeTaskComplexity
*/
import { analyzeTaskComplexity } from '../../../../scripts/modules/task-manager.js';
import analyzeTaskComplexity from '../../../../scripts/modules/task-manager/analyze-task-complexity.js';
import {
enableSilentMode,
disableSilentMode,
isSilentMode,
readJSON
isSilentMode
} from '../../../../scripts/modules/utils.js';
import fs from 'fs';
import path from 'path';
@@ -17,22 +16,23 @@ import path from 'path';
* @param {Object} args - Function arguments
* @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.
* @param {string} args.outputPath - Explicit absolute path to save the report.
* @param {string} [args.model] - LLM model to use for analysis
* @param {string} [args.model] - Deprecated: LLM model to use for analysis (ignored)
* @param {string|number} [args.threshold] - Minimum complexity score to recommend expansion (1-10)
* @param {boolean} [args.research] - Use Perplexity AI for research-backed complexity analysis
* @param {Object} log - Logger object
* @param {Object} [context={}] - Context object containing session data
* @param {Object} [context.session] - MCP session object
* @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}
*/
export async function analyzeTaskComplexityDirect(args, log, context = {}) {
const { session } = context; // Only extract session, not reportProgress
const { session } = context; // Extract session
// Destructure expected args
const { tasksJsonPath, outputPath, model, threshold, research } = args;
const { tasksJsonPath, outputPath, model, threshold, research } = args; // Model is ignored by core function now
// --- Initial Checks (remain the same) ---
try {
log.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);
// Check if required paths were provided
if (!tasksJsonPath) {
log.error('analyzeTaskComplexityDirect called without tasksJsonPath');
return {
@@ -51,7 +51,6 @@ export async function analyzeTaskComplexityDirect(args, log, context = {}) {
};
}
// Use the provided paths
const tasksPath = tasksJsonPath;
const resolvedOutputPath = outputPath;
@@ -59,25 +58,25 @@ export async function analyzeTaskComplexityDirect(args, log, context = {}) {
log.info(`Output report will be saved to: ${resolvedOutputPath}`);
if (research) {
log.info('Using Perplexity AI for research-backed complexity analysis');
log.info('Using research role for complexity analysis');
}
// Create options object for analyzeTaskComplexity using provided paths
// Prepare options for the core function
const options = {
file: tasksPath,
output: resolvedOutputPath,
model: model,
// model: model, // No longer needed
threshold: threshold,
research: research === true
research: research === true // Ensure boolean
};
// --- End Initial Checks ---
// Enable silent mode to prevent console logs from interfering with JSON response
// --- Silent Mode and Logger Wrapper (remain the same) ---
const wasSilent = isSilentMode();
if (!wasSilent) {
enableSilentMode();
}
// Create a logWrapper that matches the expected mcpLog interface as specified in utilities.mdc
const logWrapper = {
info: (message, ...args) => log.info(message, ...args),
warn: (message, ...args) => log.warn(message, ...args),
@@ -85,52 +84,71 @@ export async function analyzeTaskComplexityDirect(args, log, context = {}) {
debug: (message, ...args) => log.debug && log.debug(message, ...args),
success: (message, ...args) => log.info(message, ...args) // Map success to info
};
// --- End Silent Mode and Logger Wrapper ---
let report; // To store the result from the core function
try {
// Call the core function with session and logWrapper as mcpLog
await analyzeTaskComplexity(options, {
session,
mcpLog: logWrapper // Use the wrapper instead of passing log directly
// --- Call Core Function (Updated Context Passing) ---
// Call the core function, passing options and the context object { session, mcpLog }
report = await analyzeTaskComplexity(options, {
session, // Pass the session object
mcpLog: logWrapper // Pass the logger wrapper
});
// --- End Core Function Call ---
} catch (error) {
log.error(`Error in analyzeTaskComplexity: ${error.message}`);
log.error(
`Error in analyzeTaskComplexity core function: ${error.message}`
);
// Restore logging if we changed it
if (!wasSilent && isSilentMode()) {
disableSilentMode();
}
return {
success: false,
error: {
code: 'ANALYZE_ERROR',
message: `Error running complexity analysis: ${error.message}`
code: 'ANALYZE_CORE_ERROR', // More specific error code
message: `Error running core complexity analysis: ${error.message}`
}
};
} finally {
// Always restore normal logging in finally block, but only if we enabled it
if (!wasSilent) {
// Always restore normal logging in finally block if we enabled silent mode
if (!wasSilent && isSilentMode()) {
disableSilentMode();
}
}
// Verify the report file was created
// --- Result Handling (remains largely the same) ---
// Verify the report file was created (core function writes it)
if (!fs.existsSync(resolvedOutputPath)) {
return {
success: false,
error: {
code: 'ANALYZE_ERROR',
message: 'Analysis completed but no report file was created'
code: 'ANALYZE_REPORT_MISSING', // Specific code
message:
'Analysis completed but no report file was created at the expected path.'
}
};
}
// The core function now returns the report object directly
if (!report || !report.complexityAnalysis) {
log.error(
'Core analyzeTaskComplexity function did not return a valid report object.'
);
return {
success: false,
error: {
code: 'INVALID_CORE_RESPONSE',
message: 'Core analysis function returned an invalid response.'
}
};
}
// Read the report file
let report;
try {
report = JSON.parse(fs.readFileSync(resolvedOutputPath, 'utf8'));
const analysisArray = report.complexityAnalysis; // Already an array
// Important: Handle different report formats
// The core function might return an array or an object with a complexityAnalysis property
const analysisArray = Array.isArray(report)
? report
: report.complexityAnalysis || [];
// Count tasks by complexity
// Count tasks by complexity (remains the same)
const highComplexityTasks = analysisArray.filter(
(t) => t.complexityScore >= 8
).length;
@@ -152,29 +170,33 @@ export async function analyzeTaskComplexityDirect(args, log, context = {}) {
mediumComplexityTasks,
lowComplexityTasks
}
// Include the full report data if needed by the client
// fullReport: report
}
};
} catch (parseError) {
log.error(`Error parsing report file: ${parseError.message}`);
// Should not happen if core function returns object, but good safety check
log.error(`Internal error processing report data: ${parseError.message}`);
return {
success: false,
error: {
code: 'REPORT_PARSE_ERROR',
message: `Error parsing complexity report: ${parseError.message}`
code: 'REPORT_PROCESS_ERROR',
message: `Internal error processing complexity report: ${parseError.message}`
}
};
}
// --- End Result Handling ---
} catch (error) {
// Make sure to restore normal logging even if there's an error
// Catch errors from initial checks or path resolution
// Make sure to restore normal logging if silent mode was enabled
if (isSilentMode()) {
disableSilentMode();
}
log.error(`Error in analyzeTaskComplexityDirect: ${error.message}`);
log.error(`Error in analyzeTaskComplexityDirect setup: ${error.message}`);
return {
success: false,
error: {
code: 'CORE_FUNCTION_ERROR',
code: 'DIRECT_FUNCTION_SETUP_ERROR',
message: error.message
}
};