diff --git a/.changeset/flat-queens-burn.md b/.changeset/flat-queens-burn.md deleted file mode 100644 index 2c8a6aa1..00000000 --- a/.changeset/flat-queens-burn.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"task-master-ai": minor ---- - -Improve analyze-complexity UI with realtime feedback. diff --git a/scripts/modules/commands.js b/scripts/modules/commands.js index 30491155..5fbf6327 100644 --- a/scripts/modules/commands.js +++ b/scripts/modules/commands.js @@ -41,7 +41,6 @@ import { displayNextTask, displayTaskById, displayComplexityReport, - displayComplexityAnalysisStart, getStatusWithColor, confirmTaskOverwrite } from './ui.js'; @@ -465,8 +464,12 @@ function registerCommands(programInstance) { const thresholdScore = parseFloat(options.threshold); const useResearch = options.research || false; - // Call the dedicated UI function to display complexity analysis start information - displayComplexityAnalysisStart(tasksPath, outputPath, useResearch, modelOverride || CONFIG.model, CONFIG.temperature); + console.log(chalk.blue(`Analyzing task complexity from: ${tasksPath}`)); + console.log(chalk.blue(`Output report will be saved to: ${outputPath}`)); + + if (useResearch) { + console.log(chalk.blue('Using Perplexity AI for research-backed complexity analysis')); + } await analyzeTaskComplexity(options); }); diff --git a/scripts/modules/task-manager.js b/scripts/modules/task-manager.js index dccb8ea4..3df5a44c 100644 --- a/scripts/modules/task-manager.js +++ b/scripts/modules/task-manager.js @@ -30,9 +30,7 @@ import { getComplexityWithColor, startLoadingIndicator, stopLoadingIndicator, - createProgressBar, - displayAnalysisProgress, - formatComplexitySummary + createProgressBar } from './ui.js'; import { @@ -302,45 +300,10 @@ Return only the updated tasks as a valid JSON array.` }); // Process the stream - let responseText = ''; // Define responseText variable - try { - let chunkCount = 0; - let isProcessing = true; - // Add a local check that gets set to false if SIGINT is received - const originalSigintHandler = sigintHandler; - - // Enhance the SIGINT handler to set isProcessing to false - sigintHandler = () => { - isProcessing = false; - - // Call original handler to do the rest of cleanup and exit - if (originalSigintHandler) originalSigintHandler(); - }; - - for await (const chunk of stream) { - // Check if we should stop processing (SIGINT received) - if (!isProcessing) { - break; - } - - if (chunk.type === 'content_block_delta' && chunk.delta.text) { - responseText += chunk.delta.text; - chunkCount++; - } + for await (const chunk of stream) { + if (chunk.type === 'content_block_delta' && chunk.delta.text) { + responseText += chunk.delta.text; } - - // Restore original handler if we didn't get interrupted - if (isProcessing) { - sigintHandler = originalSigintHandler; - } - } catch (streamError) { - // Clean up the interval even if there's an error - if (streamingInterval) { - clearInterval(streamingInterval); - streamingInterval = null; - } - - throw streamError; } if (streamingInterval) clearInterval(streamingInterval); @@ -2131,45 +2094,10 @@ async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium' }, 500); // Process the stream - console.log(chalk.yellow('[DEBUG] Starting to process Claude stream')); - try { - let chunkCount = 0; - let isProcessing = true; - // Add a local check that gets set to false if SIGINT is received - const originalSigintHandler = sigintHandler; - - // Enhance the SIGINT handler to set isProcessing to false - sigintHandler = () => { - isProcessing = false; - - // Call original handler to do the rest of cleanup and exit - if (originalSigintHandler) originalSigintHandler(); - }; - - for await (const chunk of stream) { - // Check if we should stop processing (SIGINT received) - if (!isProcessing) { - break; - } - - if (chunk.type === 'content_block_delta' && chunk.delta.text) { - fullResponse += chunk.delta.text; - chunkCount++; - } + for await (const chunk of stream) { + if (chunk.type === 'content_block_delta' && chunk.delta.text) { + fullResponse += chunk.delta.text; } - - // Restore original handler if we didn't get interrupted - if (isProcessing) { - sigintHandler = originalSigintHandler; - } - } catch (streamError) { - // Clean up the interval even if there's an error - if (streamingInterval) { - clearInterval(streamingInterval); - streamingInterval = null; - } - - throw streamError; } if (streamingInterval) clearInterval(streamingInterval); @@ -2255,67 +2183,22 @@ async function analyzeTaskComplexity(options) { const tasksPath = options.file || 'tasks/tasks.json'; const outputPath = options.output || 'scripts/task-complexity-report.json'; const modelOverride = options.model; - - // Define streamingInterval at the top level of the function so the handler can access it - let streamingInterval = null; - - // Add a debug listener at the process level to see if SIGINT is being received - const debugSignalListener = () => {}; - process.on('SIGINT', debugSignalListener); - - // Set up SIGINT (Control-C) handler to cancel the operation gracefully - let sigintHandler; - const registerSigintHandler = () => { - // Only register if not already registered - if (!sigintHandler) { - sigintHandler = () => { - // Try to clear any intervals before exiting - if (streamingInterval) { - clearInterval(streamingInterval); - streamingInterval = null; - } - - // Clear any terminal state - process.stdout.write('\r\x1B[K'); // Clear current line - - console.log(chalk.yellow('\n\nAnalysis cancelled by user.')); - - // Make sure we remove our event listeners before exiting - cleanupSigintHandler(); - - // Force exit after giving time for cleanup - setTimeout(() => { - process.exit(0); - }, 100); - }; - process.on('SIGINT', sigintHandler); - } - }; - - // Clean up function to remove the handler when done - const cleanupSigintHandler = () => { - if (sigintHandler) { - process.removeListener('SIGINT', sigintHandler); - sigintHandler = null; - } - - // Also remove the debug listener - process.removeListener('SIGINT', debugSignalListener); - }; const thresholdScore = parseFloat(options.threshold || '5'); const useResearch = options.research || false; - // Initialize error tracking variable - let apiError = false; + console.log(chalk.blue(`Analyzing task complexity and generating expansion recommendations...`)); try { // Read tasks.json + console.log(chalk.blue(`Reading tasks from ${tasksPath}...`)); const tasksData = readJSON(tasksPath); if (!tasksData || !tasksData.tasks || !Array.isArray(tasksData.tasks) || tasksData.tasks.length === 0) { throw new Error('No tasks found in the tasks file'); } + console.log(chalk.blue(`Found ${tasksData.tasks.length} tasks to analyze.`)); + // Prepare the prompt for the LLM const prompt = generateComplexityAnalysisPrompt(tasksData); @@ -2323,51 +2206,13 @@ async function analyzeTaskComplexity(options) { const loadingIndicator = startLoadingIndicator('Calling AI to analyze task complexity...'); let fullResponse = ''; + let streamingInterval = null; try { // If research flag is set, use Perplexity first if (useResearch) { try { - // Register SIGINT handler to allow cancellation with Control-C - registerSigintHandler(); - - // Start tracking elapsed time and update information display - const startTime = Date.now(); - const totalTaskCount = tasksData.tasks.length; - - // IMPORTANT: Stop the loading indicator before showing the progress bar - stopLoadingIndicator(loadingIndicator); - - // Set up the progress data - const progressData = { - model: process.env.PERPLEXITY_MODEL || 'sonar-pro', - contextTokens: 0, - elapsed: 0, - temperature: CONFIG.temperature, - tasksAnalyzed: 0, - totalTasks: totalTaskCount, - percentComplete: 0, - maxTokens: CONFIG.maxTokens - }; - - // Estimate context tokens (rough approximation - 1 token ~= 4 chars) - const estimatedContextTokens = Math.ceil(prompt.length / 4); - progressData.contextTokens = estimatedContextTokens; - - // Display initial progress before API call begins - displayAnalysisProgress(progressData); - - // Update progress display at regular intervals - streamingInterval = setInterval(() => { - // Update elapsed time - progressData.elapsed = (Date.now() - startTime) / 1000; - progressData.percentComplete = Math.min(90, (progressData.elapsed / 30) * 100); // Estimate based on typical 30s completion - - // Estimate number of tasks analyzed based on percentage - progressData.tasksAnalyzed = Math.floor((progressData.percentComplete / 100) * totalTaskCount); - - displayAnalysisProgress(progressData); - }, 100); + console.log(chalk.blue('Using Perplexity AI for research-backed complexity analysis...')); // Modify prompt to include more context for Perplexity and explicitly request JSON const researchPrompt = `You are conducting a detailed analysis of software development tasks to determine their complexity and how they should be broken down into subtasks. @@ -2413,36 +2258,17 @@ DO NOT include any text before or after the JSON array. No explanations, no mark fullResponse = result.choices[0].message.content; console.log(chalk.green('Successfully generated complexity analysis with Perplexity AI')); - // Clean up the interval - if (streamingInterval) { - clearInterval(streamingInterval); - streamingInterval = null; - } - - // Show completion - progressData.percentComplete = 100; - progressData.tasksAnalyzed = progressData.totalTasks; - progressData.completed = true; - displayAnalysisProgress(progressData); - + if (streamingInterval) clearInterval(streamingInterval); stopLoadingIndicator(loadingIndicator); - - // Log the first part of the response for debugging - console.debug(chalk.gray('Response first 200 chars:')); - console.debug(chalk.gray(fullResponse.substring(0, 200))); + + // ALWAYS log the first part of the response for debugging + console.log(chalk.gray('Response first 200 chars:')); + console.log(chalk.gray(fullResponse.substring(0, 200))); } catch (perplexityError) { - console.error(chalk.yellow('Falling back to Claude for complexity analysis...')); - console.error(chalk.gray('Perplexity error:'), perplexityError.message); - - // Clean up - if (streamingInterval) { - clearInterval(streamingInterval); - streamingInterval = null; - } - cleanupSigintHandler(); + console.log(chalk.yellow('Falling back to Claude for complexity analysis...')); + console.log(chalk.gray('Perplexity error:'), perplexityError.message); // Continue to Claude as fallback - console.log(chalk.yellow('\nFalling back to Claude after Perplexity error: ' + perplexityError.message)); await useClaudeForComplexityAnalysis(); } } else { @@ -2452,13 +2278,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark // Helper function to use Claude for complexity analysis async function useClaudeForComplexityAnalysis() { - // Register SIGINT handler to allow cancellation with Control-C - registerSigintHandler(); - // Call the LLM API with streaming - // Add try-catch for better error handling specifically for API call - try { - const stream = await anthropic.messages.create({ + const stream = await anthropic.messages.create({ max_tokens: CONFIG.maxTokens, model: modelOverride || CONFIG.model, temperature: CONFIG.temperature, @@ -2467,54 +2288,13 @@ DO NOT include any text before or after the JSON array. No explanations, no mark stream: true }); - // Stop the default loading indicator before showing our custom UI - stopLoadingIndicator(loadingIndicator); - - // Start tracking elapsed time and update information display - const startTime = Date.now(); - const totalTaskCount = tasksData.tasks.length; - - // Set up the progress data - const progressData = { - model: modelOverride || CONFIG.model, - contextTokens: 0, // Will estimate based on prompt size - elapsed: 0, - temperature: CONFIG.temperature, - tasksAnalyzed: 0, - totalTasks: totalTaskCount, - percentComplete: 0, - maxTokens: CONFIG.maxTokens - }; - - // Estimate context tokens (rough approximation - 1 token ~= 4 chars) - const estimatedContextTokens = Math.ceil(prompt.length / 4); - progressData.contextTokens = estimatedContextTokens; - - // Display initial progress before streaming begins - displayAnalysisProgress(progressData); - - // Update progress display at regular intervals + // Update loading indicator to show streaming progress + let dotCount = 0; streamingInterval = setInterval(() => { - // Update elapsed time - progressData.elapsed = (Date.now() - startTime) / 1000; - - // Estimate completion percentage based on response length - if (fullResponse.length > 0) { - // Estimate based on expected response size (approx. 500 chars per task) - const expectedResponseSize = totalTaskCount * 500; - const estimatedProgress = Math.min(95, (fullResponse.length / expectedResponseSize) * 100); - progressData.percentComplete = estimatedProgress; - - // Estimate analyzed tasks based on JSON objects found - const taskMatches = fullResponse.match(/"taskId"\s*:\s*\d+/g); - if (taskMatches) { - progressData.tasksAnalyzed = Math.min(totalTaskCount, taskMatches.length); - } - } - - // Display the progress information - displayAnalysisProgress(progressData); - }, 100); // Update much more frequently for smoother animation + readline.cursorTo(process.stdout, 0); + process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`); + dotCount = (dotCount + 1) % 4; + }, 500); // Process the stream for await (const chunk of stream) { @@ -2523,48 +2303,14 @@ DO NOT include any text before or after the JSON array. No explanations, no mark } } - // Clean up the interval - stop updating progress - if (streamingInterval) { - clearInterval(streamingInterval); - streamingInterval = null; - } + clearInterval(streamingInterval); + stopLoadingIndicator(loadingIndicator); - // Show completion message immediately - progressData.percentComplete = 100; - progressData.elapsed = (Date.now() - startTime) / 1000; - progressData.tasksAnalyzed = progressData.totalTasks; - progressData.completed = true; - progressData.contextTokens = Math.max(progressData.contextTokens, estimatedContextTokens); // Ensure the final token count is accurate - displayAnalysisProgress(progressData); - - // Clear the line completely to remove any artifacts (after showing completion) - process.stdout.write('\r\x1B[K'); // Clear current line - process.stdout.write('\r'); // Move cursor to beginning of line - } catch (apiError) { - // Handle specific API errors here - if (streamingInterval) clearInterval(streamingInterval); - process.stdout.write('\r\x1B[K'); // Clear current line - - console.error(chalk.red(`\nAPI Error: ${apiError.message || 'Unknown error'}\n`)); - console.log(chalk.yellow('This might be a temporary issue with the Claude API.')); - console.log(chalk.yellow('Please try again in a few moments or check your API key.')); - - // Rethrow to be caught by outer handler - throw apiError; - } finally { - // Clean up SIGINT handler - cleanupSigintHandler(); - - // Ensure the interval is cleared - if (streamingInterval) { - clearInterval(streamingInterval); - streamingInterval = null; - } - } + console.log(chalk.green("Completed streaming response from Claude API!")); } // Parse the JSON response - console.log(chalk.blue(` Parsing complexity analysis...`)); + console.log(chalk.blue(`Parsing complexity analysis...`)); let complexityAnalysis; try { // Clean up the response to ensure it's valid JSON @@ -2574,14 +2320,14 @@ DO NOT include any text before or after the JSON array. No explanations, no mark const codeBlockMatch = fullResponse.match(/```(?:json)?\s*([\s\S]*?)\s*```/); if (codeBlockMatch) { cleanedResponse = codeBlockMatch[1]; - console.debug(chalk.blue("Extracted JSON from code block")); + console.log(chalk.blue("Extracted JSON from code block")); } else { // Look for a complete JSON array pattern // This regex looks for an array of objects starting with [ and ending with ] const jsonArrayMatch = fullResponse.match(/(\[\s*\{\s*"[^"]*"\s*:[\s\S]*\}\s*\])/); if (jsonArrayMatch) { cleanedResponse = jsonArrayMatch[1]; - console.log(chalk.blue(" Extracted JSON array pattern")); + console.log(chalk.blue("Extracted JSON array pattern")); } else { // Try to find the start of a JSON array and capture to the end const jsonStartMatch = fullResponse.match(/(\[\s*\{[\s\S]*)/); @@ -2596,19 +2342,19 @@ DO NOT include any text before or after the JSON array. No explanations, no mark } } } - + // Log the cleaned response for debugging - console.debug(chalk.gray("Attempting to parse cleaned JSON...")); - console.debug(chalk.gray("Cleaned response (first 100 chars):")); - console.debug(chalk.gray(cleanedResponse.substring(0, 100))); - console.debug(chalk.gray("Last 100 chars:")); - console.debug(chalk.gray(cleanedResponse.substring(cleanedResponse.length - 100))); + console.log(chalk.gray("Attempting to parse cleaned JSON...")); + console.log(chalk.gray("Cleaned response (first 100 chars):")); + console.log(chalk.gray(cleanedResponse.substring(0, 100))); + console.log(chalk.gray("Last 100 chars:")); + console.log(chalk.gray(cleanedResponse.substring(cleanedResponse.length - 100))); // More aggressive cleaning - strip any non-JSON content at the beginning or end const strictArrayMatch = cleanedResponse.match(/(\[\s*\{[\s\S]*\}\s*\])/); if (strictArrayMatch) { cleanedResponse = strictArrayMatch[1]; - console.debug(chalk.blue("Applied strict JSON array extraction")); + console.log(chalk.blue("Applied strict JSON array extraction")); } try { @@ -2724,45 +2470,6 @@ DO NOT include any text before or after the JSON array. No explanations, no mark // Use the same AI model as the original analysis if (useResearch) { - // Register SIGINT handler again to make sure it's active for this phase - registerSigintHandler(); - - // Start tracking elapsed time for missing tasks - const missingTasksStartTime = Date.now(); - - // Stop the loading indicator before showing progress - stopLoadingIndicator(missingTasksLoadingIndicator); - - // Set up progress tracking for missing tasks - const missingProgressData = { - model: process.env.PERPLEXITY_MODEL || 'sonar-pro', - contextTokens: 0, - elapsed: 0, - temperature: CONFIG.temperature, - tasksAnalyzed: 0, - totalTasks: missingTaskIds.length, - percentComplete: 0, - maxTokens: CONFIG.maxTokens - }; - - // Estimate context tokens - const estimatedMissingContextTokens = Math.ceil(missingTasksPrompt.length / 4); - missingProgressData.contextTokens = estimatedMissingContextTokens; - - // Display initial progress - displayAnalysisProgress(missingProgressData); - - // Update progress display regularly - const missingTasksInterval = setInterval(() => { - missingProgressData.elapsed = (Date.now() - missingTasksStartTime) / 1000; - missingProgressData.percentComplete = Math.min(90, (missingProgressData.elapsed / 20) * 100); // Estimate ~20s completion - - // Estimate number of tasks analyzed based on percentage - missingProgressData.tasksAnalyzed = Math.floor((missingProgressData.percentComplete / 100) * missingTaskIds.length); - - displayAnalysisProgress(missingProgressData); - }, 100); - // Create the same research prompt but for missing tasks const missingTasksResearchPrompt = `You are conducting a detailed analysis of software development tasks to determine their complexity and how they should be broken down into subtasks. @@ -2787,45 +2494,24 @@ Your response must be a clean JSON array only, following exactly this format: DO NOT include any text before or after the JSON array. No explanations, no markdown formatting.`; - try { - const result = await perplexity.chat.completions.create({ - model: process.env.PERPLEXITY_MODEL || 'sonar-pro', - messages: [ - { - role: "system", - content: "You are a technical analysis AI that only responds with clean, valid JSON. Never include explanatory text or markdown formatting in your response." - }, - { - role: "user", - content: missingTasksResearchPrompt - } - ], - temperature: CONFIG.temperature, - max_tokens: CONFIG.maxTokens, - }); - - // Extract the response - missingAnalysisResponse = result.choices[0].message.content; - - // Stop interval and show completion - clearInterval(missingTasksInterval); - missingProgressData.percentComplete = 100; - missingProgressData.tasksAnalyzed = missingProgressData.totalTasks; - missingProgressData.completed = true; - displayAnalysisProgress(missingProgressData); - } catch (error) { - // Clean up on error - if (missingTasksInterval) { - clearInterval(missingTasksInterval); - } - throw error; - } finally { - // Always clean up SIGINT handler and interval - cleanupSigintHandler(); - if (missingTasksInterval) { - clearInterval(missingTasksInterval); - } - } + const result = await perplexity.chat.completions.create({ + model: process.env.PERPLEXITY_MODEL || 'sonar-pro', + messages: [ + { + role: "system", + content: "You are a technical analysis AI that only responds with clean, valid JSON. Never include explanatory text or markdown formatting in your response." + }, + { + role: "user", + content: missingTasksResearchPrompt + } + ], + temperature: CONFIG.temperature, + max_tokens: CONFIG.maxTokens, + }); + + // Extract the response + missingAnalysisResponse = result.choices[0].message.content; } else { // Use Claude const stream = await anthropic.messages.create({ @@ -2861,7 +2547,7 @@ DO NOT include any text before or after the JSON array. No explanations, no mark const codeBlockMatch = missingAnalysisResponse.match(/```(?:json)?\s*([\s\S]*?)\s*```/); if (codeBlockMatch) { cleanedResponse = codeBlockMatch[1]; - console.debug(chalk.blue("Extracted JSON from code block for missing tasks")); + console.log(chalk.blue("Extracted JSON from code block for missing tasks")); } else { // Look for a complete JSON array pattern const jsonArrayMatch = missingAnalysisResponse.match(/(\[\s*\{\s*"[^"]*"\s*:[\s\S]*\}\s*\])/); @@ -2983,10 +2669,10 @@ DO NOT include any text before or after the JSON array. No explanations, no mark }; // Write the report to file - console.log(chalk.blue(` Writing complexity report to ${outputPath}...`)); + console.log(chalk.blue(`Writing complexity report to ${outputPath}...`)); writeJSON(outputPath, report); - console.log(chalk.green(` Task complexity analysis complete. Report written to ${outputPath}`)); + console.log(chalk.green(`Task complexity analysis complete. Report written to ${outputPath}`)); // Display a summary of findings const highComplexity = complexityAnalysis.filter(t => t.complexityScore >= 8).length; @@ -2994,43 +2680,24 @@ DO NOT include any text before or after the JSON array. No explanations, no mark const lowComplexity = complexityAnalysis.filter(t => t.complexityScore < 5).length; const totalAnalyzed = complexityAnalysis.length; - // Only show summary if we didn't encounter an API error - if (!apiError) { - // Create a summary object for formatting - const summary = { - totalTasks: tasksData.tasks.length, - analyzedTasks: totalAnalyzed, - highComplexityCount: highComplexity, - mediumComplexityCount: mediumComplexity, - lowComplexityCount: lowComplexity, - researchBacked: useResearch - }; - - // Use the new formatting function from UI module - console.log(formatComplexitySummary(summary)); - } + console.log('\nComplexity Analysis Summary:'); + console.log('----------------------------'); + console.log(`Tasks in input file: ${tasksData.tasks.length}`); + console.log(`Tasks successfully analyzed: ${totalAnalyzed}`); + console.log(`High complexity tasks: ${highComplexity}`); + console.log(`Medium complexity tasks: ${mediumComplexity}`); + console.log(`Low complexity tasks: ${lowComplexity}`); + console.log(`Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})`); + console.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`); + console.log(`\nSee ${outputPath} for the full report and expansion commands.`); } catch (error) { if (streamingInterval) clearInterval(streamingInterval); stopLoadingIndicator(loadingIndicator); - - // Mark that we encountered an API error - apiError = true; - - // Display a user-friendly error message - console.error(chalk.red(`\nAPI Error: ${error.message || 'Unknown error'}\n`)); - console.log(chalk.yellow('This might be a temporary issue with the Claude API.')); - console.log(chalk.yellow('Please try again in a few moments.')); - cleanupSigintHandler(); - - // We'll continue with any tasks we might have analyzed before the error + throw error; } } catch (error) { console.error(chalk.red(`Error analyzing task complexity: ${error.message}`)); - - // Clean up SIGINT handler - cleanupSigintHandler(); - process.exit(1); } } diff --git a/scripts/modules/ui.js b/scripts/modules/ui.js index 5bcdbe0f..ccfd0649 100644 --- a/scripts/modules/ui.js +++ b/scripts/modules/ui.js @@ -1061,255 +1061,6 @@ async function displayComplexityReport(reportPath) { )); } -/** - * Display real-time analysis progress with detailed information in a single line format - * @param {Object} progressData - Object containing progress information - * @param {string} progressData.model - Model name (e.g., 'claude-3-7-sonnet-20250219') - * @param {number} progressData.contextTokens - Context tokens used - * @param {number} progressData.elapsed - Elapsed time in seconds - * @param {number} progressData.temperature - Temperature setting - * @param {number} progressData.tasksAnalyzed - Number of tasks analyzed so far - * @param {number} progressData.totalTasks - Total number of tasks to analyze - * @param {number} progressData.percentComplete - Percentage complete (0-100) - * @param {number} progressData.maxTokens - Maximum tokens setting - * @param {boolean} progressData.completed - Whether the process is completed - * @returns {void} - */ -function displayAnalysisProgress(progressData) { - const { - model, - contextTokens = 0, - elapsed = 0, - temperature = 0.7, - tasksAnalyzed = 0, - totalTasks = 0, - percentComplete = 0, - maxTokens = 0, - completed = false - } = progressData; - - // Format the elapsed time - const timeDisplay = formatElapsedTime(elapsed); - - // Use static variables to track display state - if (displayAnalysisProgress.initialized === undefined) { - displayAnalysisProgress.initialized = false; - displayAnalysisProgress.lastUpdate = Date.now(); - displayAnalysisProgress.statusLineStarted = false; - } - - // Create progress bar (20 characters wide) - const progressBarWidth = 20; - const percentText = `${Math.round(percentComplete)}%`; - const percentTextLength = percentText.length; - - // Calculate expected total tokens and current progress - const totalTokens = contextTokens; // Use the actual token count as the total - - // Calculate current tokens based on percentage complete to show gradual increase from 0 to totalTokens - const currentTokens = completed ? totalTokens : Math.min(totalTokens, Math.round((percentComplete / 100) * totalTokens)); - - // Format token counts with proper padding - const totalTokenDigits = totalTokens.toString().length; - const currentTokensFormatted = currentTokens.toString().padStart(totalTokenDigits, '0'); - const tokenDisplay = `${currentTokensFormatted}/${totalTokens}`; - - // Calculate position for centered percentage - const halfBarWidth = Math.floor(progressBarWidth / 2); - const percentStartPos = Math.max(0, halfBarWidth - Math.floor(percentTextLength / 2)); - - // Calculate how many filled and empty chars to draw - const filledChars = Math.floor((percentComplete / 100) * progressBarWidth); - - // Create the progress bar with centered percentage (without gradient) - let progressBar = ''; - for (let i = 0; i < progressBarWidth; i++) { - // If we're at the start position for the percentage text - if (i === percentStartPos) { - // Apply bold white for percentage text to stand out - progressBar += chalk.bold.white(percentText); - // Skip ahead by the length of the percentage text - i += percentTextLength - 1; - } else if (i < filledChars) { - // Use a single color instead of gradient - progressBar += chalk.cyan('█'); - } else { - // Use a subtle character for empty space - progressBar += chalk.gray('░'); - } - } - - // Use spinner from ora - these are the actual frames used in the default spinner - const spinnerFrames = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']; - - // Increment the counter faster to speed up the animation - if (!displayAnalysisProgress.frameCounter) { - displayAnalysisProgress.frameCounter = 0; - } - if (!displayAnalysisProgress.updateToggle) { - displayAnalysisProgress.updateToggle = false; - } - - // Toggle between updating and not updating to halve the speed - displayAnalysisProgress.updateToggle = !displayAnalysisProgress.updateToggle; - - // Only update every other call to make animation half as fast - if (displayAnalysisProgress.updateToggle) { - displayAnalysisProgress.frameCounter = (displayAnalysisProgress.frameCounter + 1) % spinnerFrames.length; - } - - const spinner = chalk.cyan(spinnerFrames[displayAnalysisProgress.frameCounter]); - - // Format status line based on whether we're complete or not - let statusLine; - - if (completed) { - // For completed progress, show checkmark and "Complete" text - statusLine = - ` ${chalk.cyan('⏱')} ${timeDisplay} ${chalk.gray('|')} ` + - `Tasks: ${chalk.bold(tasksAnalyzed)}/${totalTasks} ${chalk.gray('|')} ` + - `Tokens: ${tokenDisplay} ${chalk.gray('|')} ` + - `${progressBar} ${chalk.gray('|')} ` + - `${chalk.green('✅')} ${chalk.green('Complete')}`; - } else { - // For in-progress, show spinner and "Processing" text - statusLine = - ` ${chalk.cyan('⏱')} ${timeDisplay} ${chalk.gray('|')} ` + - `Tasks: ${chalk.bold(tasksAnalyzed)}/${totalTasks} ${chalk.gray('|')} ` + - `Tokens: ${tokenDisplay} ${chalk.gray('|')} ` + - `${progressBar} ${chalk.gray('|')} ` + - `${chalk.cyan('Processing')} ${spinner}`; - } - - - - // Clear the line and update the status - process.stdout.write('\r\x1B[K'); - process.stdout.write(statusLine); - - // Additional handling for completion - if (completed) { - // Move to next line and print completion message in a box - process.stdout.write('\n\n'); - - console.log(boxen( - chalk.green(`Task complexity analysis completed in ${timeDisplay}`) + '\n' + - chalk.green(`✅ Analyzed ${tasksAnalyzed} tasks successfully.`), - { - padding: { top: 1, bottom: 1, left: 2, right: 2 }, - margin: { top: 0, bottom: 1 }, - borderColor: 'green', - borderStyle: 'round' - } - )); - - // Reset initialization state for next run - displayAnalysisProgress.initialized = undefined; - displayAnalysisProgress.statusLineStarted = false; - } -} - -/** - * Format elapsed time in the format shown in the screenshot (0m 00s) - * @param {number} seconds - Elapsed time in seconds - * @returns {string} Formatted time string - */ -function formatElapsedTime(seconds) { - const minutes = Math.floor(seconds / 60); - const remainingSeconds = Math.floor(seconds % 60); - return `${minutes}m ${remainingSeconds.toString().padStart(2, '0')}s`; -} - -/** - * Format a complexity summary from analyze-complexity with a neat boxed display - * @param {Object} summary The complexity analysis summary - * @returns {string} The formatted summary - */ -function formatComplexitySummary(summary) { - // Calculate verification sum - const sumTotal = summary.highComplexityCount + summary.mediumComplexityCount + summary.lowComplexityCount; - const verificationStatus = sumTotal === summary.analyzedTasks ? chalk.green('✅') : chalk.red('✗'); - - // Create a table for better alignment - const table = new Table({ - chars: { - 'top': '', 'top-mid': '', 'top-left': '', 'top-right': '', - 'bottom': '', 'bottom-mid': '', 'bottom-left': '', 'bottom-right': '', - 'left': '', 'left-mid': '', 'mid': '', 'mid-mid': '', - 'right': '', 'right-mid': '', 'middle': ' ' - }, - style: { border: [], 'padding-left': 2 }, - colWidths: [28, 50] - }); - - // Basic info - table.push( - [chalk.cyan('Tasks in input file:'), chalk.bold(summary.totalTasks)], - [chalk.cyan('Tasks analyzed:'), chalk.bold(summary.analyzedTasks)] - ); - - // Complexity distribution in one row - const percentHigh = Math.round((summary.highComplexityCount / summary.analyzedTasks) * 100); - const percentMed = Math.round((summary.mediumComplexityCount / summary.analyzedTasks) * 100); - const percentLow = Math.round((summary.lowComplexityCount / summary.analyzedTasks) * 100); - - const complexityRow = [ - chalk.cyan('Complexity distribution:'), - `${chalk.hex('#CC0000').bold(summary.highComplexityCount)} ${chalk.hex('#CC0000')('High')} (${percentHigh}%) · ` + - `${chalk.hex('#FF8800').bold(summary.mediumComplexityCount)} ${chalk.hex('#FF8800')('Medium')} (${percentMed}%) · ` + - `${chalk.yellow.bold(summary.lowComplexityCount)} ${chalk.yellow('Low')} (${percentLow}%)` - ]; - table.push(complexityRow); - - // Visual bar representation of complexity distribution - const barWidth = 40; // Total width of the bar - - // Only show bars for categories with at least 1 task - const highChars = summary.highComplexityCount > 0 ? - Math.max(1, Math.round((summary.highComplexityCount / summary.analyzedTasks) * barWidth)) : 0; - - const medChars = summary.mediumComplexityCount > 0 ? - Math.max(1, Math.round((summary.mediumComplexityCount / summary.analyzedTasks) * barWidth)) : 0; - - const lowChars = summary.lowComplexityCount > 0 ? - Math.max(1, Math.round((summary.lowComplexityCount / summary.analyzedTasks) * barWidth)) : 0; - - // Adjust bar width if some categories have 0 tasks - const actualBarWidth = highChars + medChars + lowChars; - - const distributionBar = - chalk.hex('#CC0000')('█'.repeat(highChars)) + - chalk.hex('#FF8800')('█'.repeat(medChars)) + - chalk.yellow('█'.repeat(lowChars)) + - // Add empty space if actual bar is shorter than expected - (actualBarWidth < barWidth ? chalk.gray('░'.repeat(barWidth - actualBarWidth)) : ''); - - table.push([chalk.cyan('Distribution:'), distributionBar]); - - // Add verification and research status - table.push( - [chalk.cyan('Verification:'), `${verificationStatus} ${sumTotal}/${summary.analyzedTasks}`], - [chalk.cyan('Research-backed:'), summary.researchBacked ? chalk.green('✅') : 'No'] - ); - - // Final string output with title and footer - const output = [ - chalk.bold.underline('Complexity Analysis Summary'), - '', - table.toString(), - '', - `Report saved to: ${chalk.italic('scripts/task-complexity-report.json')}` - ].join('\n'); - - // Return a boxed version - return boxen(output, { - padding: { top: 1, right: 1, bottom: 1, left: 1 }, - borderColor: 'blue', - borderStyle: 'round', - margin: { top: 1, right: 1, bottom: 1, left: 0 } - }); -} - /** * Confirm overwriting existing tasks.json file * @param {string} tasksPath - Path to the tasks.json file @@ -1337,38 +1088,6 @@ async function confirmTaskOverwrite(tasksPath) { return answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes'; } -/** - * Display the start of complexity analysis with a boxen announcement - * @param {string} tasksPath - Path to the tasks file being analyzed - * @param {string} outputPath - Path where the report will be saved - * @param {boolean} useResearch - Whether Perplexity AI research is enabled - * @param {string} model - AI model name - * @param {number} temperature - AI temperature setting - */ -function displayComplexityAnalysisStart(tasksPath, outputPath, useResearch = false, model = CONFIG.model, temperature = CONFIG.temperature) { - // Create the message content with all information - let message = chalk.bold(`🤖 Analyzing Task Complexity`) + '\n' + - chalk.dim(`Model: ${model} | Temperature: ${temperature}`) + '\n\n' + - chalk.blue(`Input: ${tasksPath}`) + '\n' + - chalk.blue(`Output: ${outputPath}`); - - // Add research info if enabled - if (useResearch) { - message += '\n' + chalk.blue('Using Perplexity AI for research-backed analysis'); - } - - // Display everything in a single boxen - console.log(boxen( - message, - { - padding: { top: 1, bottom: 1, left: 2, right: 2 }, - margin: { top: 0, bottom: 0 }, - borderColor: 'blue', - borderStyle: 'round' - } - )); -} - // Export UI functions export { displayBanner, @@ -1381,9 +1100,6 @@ export { getComplexityWithColor, displayNextTask, displayTaskById, - displayComplexityAnalysisStart, displayComplexityReport, - displayAnalysisProgress, - formatComplexitySummary, confirmTaskOverwrite }; \ No newline at end of file