Compare commits
6 Commits
revert-70-
...
crunchyman
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e350c315e3 | ||
|
|
c4cf104a30 | ||
|
|
37278292bc | ||
|
|
31f98c6521 | ||
|
|
b892d9743a | ||
|
|
9dc5e75760 |
5
.changeset/fifty-squids-wear.md
Normal file
5
.changeset/fifty-squids-wear.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"task-master-ai": patch
|
||||
---
|
||||
|
||||
Add CI for testing
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
"task-master-ai": minor
|
||||
---
|
||||
|
||||
Improve analyze-complexity UI with realtime feedback.
|
||||
61
.github/workflows/ci.yml
vendored
Normal file
61
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- next
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: "npm"
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
*/*/node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
npm run test:coverage -- --coverageThreshold '{"global":{"branches":0,"functions":0,"lines":0,"statements":0}}' --detectOpenHandles --forceExit
|
||||
env:
|
||||
NODE_ENV: test
|
||||
CI: true
|
||||
FORCE_COLOR: 1
|
||||
timeout-minutes: 15
|
||||
|
||||
- name: Upload Test Results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-results-node
|
||||
path: |
|
||||
test-results
|
||||
coverage
|
||||
junit.xml
|
||||
retention-days: 30
|
||||
14
.github/workflows/release.yml
vendored
14
.github/workflows/release.yml
vendored
@@ -14,9 +14,21 @@ jobs:
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: "npm"
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
*/*/node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: npm install
|
||||
run: npm ci
|
||||
timeout-minutes: 2
|
||||
|
||||
- name: Create Release Pull Request or Publish to npm
|
||||
uses: changesets/action@v1
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
# Task Master
|
||||
|
||||
[](https://github.com/eyaltoledano/claude-task-master/actions/workflows/ci.yml)
|
||||
[](LICENSE)
|
||||
[](https://badge.fury.io/js/task-master-ai)
|
||||
|
||||
### by [@eyaltoledano](https://x.com/eyaltoledano)
|
||||
|
||||
A task management system for AI-driven development with Claude, designed to work seamlessly with Cursor AI.
|
||||
|
||||
@@ -41,7 +41,6 @@ import {
|
||||
displayNextTask,
|
||||
displayTaskById,
|
||||
displayComplexityReport,
|
||||
displayComplexityAnalysisStart,
|
||||
getStatusWithColor,
|
||||
confirmTaskOverwrite
|
||||
} from './ui.js';
|
||||
@@ -465,8 +464,12 @@ function registerCommands(programInstance) {
|
||||
const thresholdScore = parseFloat(options.threshold);
|
||||
const useResearch = options.research || false;
|
||||
|
||||
// Call the dedicated UI function to display complexity analysis start information
|
||||
displayComplexityAnalysisStart(tasksPath, outputPath, useResearch, modelOverride || CONFIG.model, CONFIG.temperature);
|
||||
console.log(chalk.blue(`Analyzing task complexity from: ${tasksPath}`));
|
||||
console.log(chalk.blue(`Output report will be saved to: ${outputPath}`));
|
||||
|
||||
if (useResearch) {
|
||||
console.log(chalk.blue('Using Perplexity AI for research-backed complexity analysis'));
|
||||
}
|
||||
|
||||
await analyzeTaskComplexity(options);
|
||||
});
|
||||
|
||||
@@ -30,9 +30,7 @@ import {
|
||||
getComplexityWithColor,
|
||||
startLoadingIndicator,
|
||||
stopLoadingIndicator,
|
||||
createProgressBar,
|
||||
displayAnalysisProgress,
|
||||
formatComplexitySummary
|
||||
createProgressBar
|
||||
} from './ui.js';
|
||||
|
||||
import {
|
||||
@@ -302,45 +300,10 @@ Return only the updated tasks as a valid JSON array.`
|
||||
});
|
||||
|
||||
// Process the stream
|
||||
let responseText = ''; // Define responseText variable
|
||||
try {
|
||||
let chunkCount = 0;
|
||||
let isProcessing = true;
|
||||
// Add a local check that gets set to false if SIGINT is received
|
||||
const originalSigintHandler = sigintHandler;
|
||||
|
||||
// Enhance the SIGINT handler to set isProcessing to false
|
||||
sigintHandler = () => {
|
||||
isProcessing = false;
|
||||
|
||||
// Call original handler to do the rest of cleanup and exit
|
||||
if (originalSigintHandler) originalSigintHandler();
|
||||
};
|
||||
|
||||
for await (const chunk of stream) {
|
||||
// Check if we should stop processing (SIGINT received)
|
||||
if (!isProcessing) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
chunkCount++;
|
||||
}
|
||||
for await (const chunk of stream) {
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
responseText += chunk.delta.text;
|
||||
}
|
||||
|
||||
// Restore original handler if we didn't get interrupted
|
||||
if (isProcessing) {
|
||||
sigintHandler = originalSigintHandler;
|
||||
}
|
||||
} catch (streamError) {
|
||||
// Clean up the interval even if there's an error
|
||||
if (streamingInterval) {
|
||||
clearInterval(streamingInterval);
|
||||
streamingInterval = null;
|
||||
}
|
||||
|
||||
throw streamError;
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
@@ -2131,45 +2094,10 @@ async function addTask(tasksPath, prompt, dependencies = [], priority = 'medium'
|
||||
}, 500);
|
||||
|
||||
// Process the stream
|
||||
console.log(chalk.yellow('[DEBUG] Starting to process Claude stream'));
|
||||
try {
|
||||
let chunkCount = 0;
|
||||
let isProcessing = true;
|
||||
// Add a local check that gets set to false if SIGINT is received
|
||||
const originalSigintHandler = sigintHandler;
|
||||
|
||||
// Enhance the SIGINT handler to set isProcessing to false
|
||||
sigintHandler = () => {
|
||||
isProcessing = false;
|
||||
|
||||
// Call original handler to do the rest of cleanup and exit
|
||||
if (originalSigintHandler) originalSigintHandler();
|
||||
};
|
||||
|
||||
for await (const chunk of stream) {
|
||||
// Check if we should stop processing (SIGINT received)
|
||||
if (!isProcessing) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
fullResponse += chunk.delta.text;
|
||||
chunkCount++;
|
||||
}
|
||||
for await (const chunk of stream) {
|
||||
if (chunk.type === 'content_block_delta' && chunk.delta.text) {
|
||||
fullResponse += chunk.delta.text;
|
||||
}
|
||||
|
||||
// Restore original handler if we didn't get interrupted
|
||||
if (isProcessing) {
|
||||
sigintHandler = originalSigintHandler;
|
||||
}
|
||||
} catch (streamError) {
|
||||
// Clean up the interval even if there's an error
|
||||
if (streamingInterval) {
|
||||
clearInterval(streamingInterval);
|
||||
streamingInterval = null;
|
||||
}
|
||||
|
||||
throw streamError;
|
||||
}
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
@@ -2255,67 +2183,22 @@ async function analyzeTaskComplexity(options) {
|
||||
const tasksPath = options.file || 'tasks/tasks.json';
|
||||
const outputPath = options.output || 'scripts/task-complexity-report.json';
|
||||
const modelOverride = options.model;
|
||||
|
||||
// Define streamingInterval at the top level of the function so the handler can access it
|
||||
let streamingInterval = null;
|
||||
|
||||
// Add a debug listener at the process level to see if SIGINT is being received
|
||||
const debugSignalListener = () => {};
|
||||
process.on('SIGINT', debugSignalListener);
|
||||
|
||||
// Set up SIGINT (Control-C) handler to cancel the operation gracefully
|
||||
let sigintHandler;
|
||||
const registerSigintHandler = () => {
|
||||
// Only register if not already registered
|
||||
if (!sigintHandler) {
|
||||
sigintHandler = () => {
|
||||
// Try to clear any intervals before exiting
|
||||
if (streamingInterval) {
|
||||
clearInterval(streamingInterval);
|
||||
streamingInterval = null;
|
||||
}
|
||||
|
||||
// Clear any terminal state
|
||||
process.stdout.write('\r\x1B[K'); // Clear current line
|
||||
|
||||
console.log(chalk.yellow('\n\nAnalysis cancelled by user.'));
|
||||
|
||||
// Make sure we remove our event listeners before exiting
|
||||
cleanupSigintHandler();
|
||||
|
||||
// Force exit after giving time for cleanup
|
||||
setTimeout(() => {
|
||||
process.exit(0);
|
||||
}, 100);
|
||||
};
|
||||
process.on('SIGINT', sigintHandler);
|
||||
}
|
||||
};
|
||||
|
||||
// Clean up function to remove the handler when done
|
||||
const cleanupSigintHandler = () => {
|
||||
if (sigintHandler) {
|
||||
process.removeListener('SIGINT', sigintHandler);
|
||||
sigintHandler = null;
|
||||
}
|
||||
|
||||
// Also remove the debug listener
|
||||
process.removeListener('SIGINT', debugSignalListener);
|
||||
};
|
||||
const thresholdScore = parseFloat(options.threshold || '5');
|
||||
const useResearch = options.research || false;
|
||||
|
||||
// Initialize error tracking variable
|
||||
let apiError = false;
|
||||
console.log(chalk.blue(`Analyzing task complexity and generating expansion recommendations...`));
|
||||
|
||||
try {
|
||||
// Read tasks.json
|
||||
console.log(chalk.blue(`Reading tasks from ${tasksPath}...`));
|
||||
const tasksData = readJSON(tasksPath);
|
||||
|
||||
if (!tasksData || !tasksData.tasks || !Array.isArray(tasksData.tasks) || tasksData.tasks.length === 0) {
|
||||
throw new Error('No tasks found in the tasks file');
|
||||
}
|
||||
|
||||
console.log(chalk.blue(`Found ${tasksData.tasks.length} tasks to analyze.`));
|
||||
|
||||
// Prepare the prompt for the LLM
|
||||
const prompt = generateComplexityAnalysisPrompt(tasksData);
|
||||
|
||||
@@ -2323,51 +2206,13 @@ async function analyzeTaskComplexity(options) {
|
||||
const loadingIndicator = startLoadingIndicator('Calling AI to analyze task complexity...');
|
||||
|
||||
let fullResponse = '';
|
||||
let streamingInterval = null;
|
||||
|
||||
try {
|
||||
// If research flag is set, use Perplexity first
|
||||
if (useResearch) {
|
||||
try {
|
||||
// Register SIGINT handler to allow cancellation with Control-C
|
||||
registerSigintHandler();
|
||||
|
||||
// Start tracking elapsed time and update information display
|
||||
const startTime = Date.now();
|
||||
const totalTaskCount = tasksData.tasks.length;
|
||||
|
||||
// IMPORTANT: Stop the loading indicator before showing the progress bar
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
// Set up the progress data
|
||||
const progressData = {
|
||||
model: process.env.PERPLEXITY_MODEL || 'sonar-pro',
|
||||
contextTokens: 0,
|
||||
elapsed: 0,
|
||||
temperature: CONFIG.temperature,
|
||||
tasksAnalyzed: 0,
|
||||
totalTasks: totalTaskCount,
|
||||
percentComplete: 0,
|
||||
maxTokens: CONFIG.maxTokens
|
||||
};
|
||||
|
||||
// Estimate context tokens (rough approximation - 1 token ~= 4 chars)
|
||||
const estimatedContextTokens = Math.ceil(prompt.length / 4);
|
||||
progressData.contextTokens = estimatedContextTokens;
|
||||
|
||||
// Display initial progress before API call begins
|
||||
displayAnalysisProgress(progressData);
|
||||
|
||||
// Update progress display at regular intervals
|
||||
streamingInterval = setInterval(() => {
|
||||
// Update elapsed time
|
||||
progressData.elapsed = (Date.now() - startTime) / 1000;
|
||||
progressData.percentComplete = Math.min(90, (progressData.elapsed / 30) * 100); // Estimate based on typical 30s completion
|
||||
|
||||
// Estimate number of tasks analyzed based on percentage
|
||||
progressData.tasksAnalyzed = Math.floor((progressData.percentComplete / 100) * totalTaskCount);
|
||||
|
||||
displayAnalysisProgress(progressData);
|
||||
}, 100);
|
||||
console.log(chalk.blue('Using Perplexity AI for research-backed complexity analysis...'));
|
||||
|
||||
// Modify prompt to include more context for Perplexity and explicitly request JSON
|
||||
const researchPrompt = `You are conducting a detailed analysis of software development tasks to determine their complexity and how they should be broken down into subtasks.
|
||||
@@ -2413,36 +2258,17 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
fullResponse = result.choices[0].message.content;
|
||||
console.log(chalk.green('Successfully generated complexity analysis with Perplexity AI'));
|
||||
|
||||
// Clean up the interval
|
||||
if (streamingInterval) {
|
||||
clearInterval(streamingInterval);
|
||||
streamingInterval = null;
|
||||
}
|
||||
|
||||
// Show completion
|
||||
progressData.percentComplete = 100;
|
||||
progressData.tasksAnalyzed = progressData.totalTasks;
|
||||
progressData.completed = true;
|
||||
displayAnalysisProgress(progressData);
|
||||
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
// Log the first part of the response for debugging
|
||||
console.debug(chalk.gray('Response first 200 chars:'));
|
||||
console.debug(chalk.gray(fullResponse.substring(0, 200)));
|
||||
|
||||
// ALWAYS log the first part of the response for debugging
|
||||
console.log(chalk.gray('Response first 200 chars:'));
|
||||
console.log(chalk.gray(fullResponse.substring(0, 200)));
|
||||
} catch (perplexityError) {
|
||||
console.error(chalk.yellow('Falling back to Claude for complexity analysis...'));
|
||||
console.error(chalk.gray('Perplexity error:'), perplexityError.message);
|
||||
|
||||
// Clean up
|
||||
if (streamingInterval) {
|
||||
clearInterval(streamingInterval);
|
||||
streamingInterval = null;
|
||||
}
|
||||
cleanupSigintHandler();
|
||||
console.log(chalk.yellow('Falling back to Claude for complexity analysis...'));
|
||||
console.log(chalk.gray('Perplexity error:'), perplexityError.message);
|
||||
|
||||
// Continue to Claude as fallback
|
||||
console.log(chalk.yellow('\nFalling back to Claude after Perplexity error: ' + perplexityError.message));
|
||||
await useClaudeForComplexityAnalysis();
|
||||
}
|
||||
} else {
|
||||
@@ -2452,13 +2278,8 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
|
||||
// Helper function to use Claude for complexity analysis
|
||||
async function useClaudeForComplexityAnalysis() {
|
||||
// Register SIGINT handler to allow cancellation with Control-C
|
||||
registerSigintHandler();
|
||||
|
||||
// Call the LLM API with streaming
|
||||
// Add try-catch for better error handling specifically for API call
|
||||
try {
|
||||
const stream = await anthropic.messages.create({
|
||||
const stream = await anthropic.messages.create({
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
model: modelOverride || CONFIG.model,
|
||||
temperature: CONFIG.temperature,
|
||||
@@ -2467,54 +2288,13 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
stream: true
|
||||
});
|
||||
|
||||
// Stop the default loading indicator before showing our custom UI
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
// Start tracking elapsed time and update information display
|
||||
const startTime = Date.now();
|
||||
const totalTaskCount = tasksData.tasks.length;
|
||||
|
||||
// Set up the progress data
|
||||
const progressData = {
|
||||
model: modelOverride || CONFIG.model,
|
||||
contextTokens: 0, // Will estimate based on prompt size
|
||||
elapsed: 0,
|
||||
temperature: CONFIG.temperature,
|
||||
tasksAnalyzed: 0,
|
||||
totalTasks: totalTaskCount,
|
||||
percentComplete: 0,
|
||||
maxTokens: CONFIG.maxTokens
|
||||
};
|
||||
|
||||
// Estimate context tokens (rough approximation - 1 token ~= 4 chars)
|
||||
const estimatedContextTokens = Math.ceil(prompt.length / 4);
|
||||
progressData.contextTokens = estimatedContextTokens;
|
||||
|
||||
// Display initial progress before streaming begins
|
||||
displayAnalysisProgress(progressData);
|
||||
|
||||
// Update progress display at regular intervals
|
||||
// Update loading indicator to show streaming progress
|
||||
let dotCount = 0;
|
||||
streamingInterval = setInterval(() => {
|
||||
// Update elapsed time
|
||||
progressData.elapsed = (Date.now() - startTime) / 1000;
|
||||
|
||||
// Estimate completion percentage based on response length
|
||||
if (fullResponse.length > 0) {
|
||||
// Estimate based on expected response size (approx. 500 chars per task)
|
||||
const expectedResponseSize = totalTaskCount * 500;
|
||||
const estimatedProgress = Math.min(95, (fullResponse.length / expectedResponseSize) * 100);
|
||||
progressData.percentComplete = estimatedProgress;
|
||||
|
||||
// Estimate analyzed tasks based on JSON objects found
|
||||
const taskMatches = fullResponse.match(/"taskId"\s*:\s*\d+/g);
|
||||
if (taskMatches) {
|
||||
progressData.tasksAnalyzed = Math.min(totalTaskCount, taskMatches.length);
|
||||
}
|
||||
}
|
||||
|
||||
// Display the progress information
|
||||
displayAnalysisProgress(progressData);
|
||||
}, 100); // Update much more frequently for smoother animation
|
||||
readline.cursorTo(process.stdout, 0);
|
||||
process.stdout.write(`Receiving streaming response from Claude${'.'.repeat(dotCount)}`);
|
||||
dotCount = (dotCount + 1) % 4;
|
||||
}, 500);
|
||||
|
||||
// Process the stream
|
||||
for await (const chunk of stream) {
|
||||
@@ -2523,48 +2303,14 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the interval - stop updating progress
|
||||
if (streamingInterval) {
|
||||
clearInterval(streamingInterval);
|
||||
streamingInterval = null;
|
||||
}
|
||||
clearInterval(streamingInterval);
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
// Show completion message immediately
|
||||
progressData.percentComplete = 100;
|
||||
progressData.elapsed = (Date.now() - startTime) / 1000;
|
||||
progressData.tasksAnalyzed = progressData.totalTasks;
|
||||
progressData.completed = true;
|
||||
progressData.contextTokens = Math.max(progressData.contextTokens, estimatedContextTokens); // Ensure the final token count is accurate
|
||||
displayAnalysisProgress(progressData);
|
||||
|
||||
// Clear the line completely to remove any artifacts (after showing completion)
|
||||
process.stdout.write('\r\x1B[K'); // Clear current line
|
||||
process.stdout.write('\r'); // Move cursor to beginning of line
|
||||
} catch (apiError) {
|
||||
// Handle specific API errors here
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
process.stdout.write('\r\x1B[K'); // Clear current line
|
||||
|
||||
console.error(chalk.red(`\nAPI Error: ${apiError.message || 'Unknown error'}\n`));
|
||||
console.log(chalk.yellow('This might be a temporary issue with the Claude API.'));
|
||||
console.log(chalk.yellow('Please try again in a few moments or check your API key.'));
|
||||
|
||||
// Rethrow to be caught by outer handler
|
||||
throw apiError;
|
||||
} finally {
|
||||
// Clean up SIGINT handler
|
||||
cleanupSigintHandler();
|
||||
|
||||
// Ensure the interval is cleared
|
||||
if (streamingInterval) {
|
||||
clearInterval(streamingInterval);
|
||||
streamingInterval = null;
|
||||
}
|
||||
}
|
||||
console.log(chalk.green("Completed streaming response from Claude API!"));
|
||||
}
|
||||
|
||||
// Parse the JSON response
|
||||
console.log(chalk.blue(` Parsing complexity analysis...`));
|
||||
console.log(chalk.blue(`Parsing complexity analysis...`));
|
||||
let complexityAnalysis;
|
||||
try {
|
||||
// Clean up the response to ensure it's valid JSON
|
||||
@@ -2574,14 +2320,14 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
const codeBlockMatch = fullResponse.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1];
|
||||
console.debug(chalk.blue("Extracted JSON from code block"));
|
||||
console.log(chalk.blue("Extracted JSON from code block"));
|
||||
} else {
|
||||
// Look for a complete JSON array pattern
|
||||
// This regex looks for an array of objects starting with [ and ending with ]
|
||||
const jsonArrayMatch = fullResponse.match(/(\[\s*\{\s*"[^"]*"\s*:[\s\S]*\}\s*\])/);
|
||||
if (jsonArrayMatch) {
|
||||
cleanedResponse = jsonArrayMatch[1];
|
||||
console.log(chalk.blue(" Extracted JSON array pattern"));
|
||||
console.log(chalk.blue("Extracted JSON array pattern"));
|
||||
} else {
|
||||
// Try to find the start of a JSON array and capture to the end
|
||||
const jsonStartMatch = fullResponse.match(/(\[\s*\{[\s\S]*)/);
|
||||
@@ -2596,19 +2342,19 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Log the cleaned response for debugging
|
||||
console.debug(chalk.gray("Attempting to parse cleaned JSON..."));
|
||||
console.debug(chalk.gray("Cleaned response (first 100 chars):"));
|
||||
console.debug(chalk.gray(cleanedResponse.substring(0, 100)));
|
||||
console.debug(chalk.gray("Last 100 chars:"));
|
||||
console.debug(chalk.gray(cleanedResponse.substring(cleanedResponse.length - 100)));
|
||||
console.log(chalk.gray("Attempting to parse cleaned JSON..."));
|
||||
console.log(chalk.gray("Cleaned response (first 100 chars):"));
|
||||
console.log(chalk.gray(cleanedResponse.substring(0, 100)));
|
||||
console.log(chalk.gray("Last 100 chars:"));
|
||||
console.log(chalk.gray(cleanedResponse.substring(cleanedResponse.length - 100)));
|
||||
|
||||
// More aggressive cleaning - strip any non-JSON content at the beginning or end
|
||||
const strictArrayMatch = cleanedResponse.match(/(\[\s*\{[\s\S]*\}\s*\])/);
|
||||
if (strictArrayMatch) {
|
||||
cleanedResponse = strictArrayMatch[1];
|
||||
console.debug(chalk.blue("Applied strict JSON array extraction"));
|
||||
console.log(chalk.blue("Applied strict JSON array extraction"));
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -2724,45 +2470,6 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
|
||||
// Use the same AI model as the original analysis
|
||||
if (useResearch) {
|
||||
// Register SIGINT handler again to make sure it's active for this phase
|
||||
registerSigintHandler();
|
||||
|
||||
// Start tracking elapsed time for missing tasks
|
||||
const missingTasksStartTime = Date.now();
|
||||
|
||||
// Stop the loading indicator before showing progress
|
||||
stopLoadingIndicator(missingTasksLoadingIndicator);
|
||||
|
||||
// Set up progress tracking for missing tasks
|
||||
const missingProgressData = {
|
||||
model: process.env.PERPLEXITY_MODEL || 'sonar-pro',
|
||||
contextTokens: 0,
|
||||
elapsed: 0,
|
||||
temperature: CONFIG.temperature,
|
||||
tasksAnalyzed: 0,
|
||||
totalTasks: missingTaskIds.length,
|
||||
percentComplete: 0,
|
||||
maxTokens: CONFIG.maxTokens
|
||||
};
|
||||
|
||||
// Estimate context tokens
|
||||
const estimatedMissingContextTokens = Math.ceil(missingTasksPrompt.length / 4);
|
||||
missingProgressData.contextTokens = estimatedMissingContextTokens;
|
||||
|
||||
// Display initial progress
|
||||
displayAnalysisProgress(missingProgressData);
|
||||
|
||||
// Update progress display regularly
|
||||
const missingTasksInterval = setInterval(() => {
|
||||
missingProgressData.elapsed = (Date.now() - missingTasksStartTime) / 1000;
|
||||
missingProgressData.percentComplete = Math.min(90, (missingProgressData.elapsed / 20) * 100); // Estimate ~20s completion
|
||||
|
||||
// Estimate number of tasks analyzed based on percentage
|
||||
missingProgressData.tasksAnalyzed = Math.floor((missingProgressData.percentComplete / 100) * missingTaskIds.length);
|
||||
|
||||
displayAnalysisProgress(missingProgressData);
|
||||
}, 100);
|
||||
|
||||
// Create the same research prompt but for missing tasks
|
||||
const missingTasksResearchPrompt = `You are conducting a detailed analysis of software development tasks to determine their complexity and how they should be broken down into subtasks.
|
||||
|
||||
@@ -2787,45 +2494,24 @@ Your response must be a clean JSON array only, following exactly this format:
|
||||
|
||||
DO NOT include any text before or after the JSON array. No explanations, no markdown formatting.`;
|
||||
|
||||
try {
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model: process.env.PERPLEXITY_MODEL || 'sonar-pro',
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a technical analysis AI that only responds with clean, valid JSON. Never include explanatory text or markdown formatting in your response."
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: missingTasksResearchPrompt
|
||||
}
|
||||
],
|
||||
temperature: CONFIG.temperature,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
});
|
||||
|
||||
// Extract the response
|
||||
missingAnalysisResponse = result.choices[0].message.content;
|
||||
|
||||
// Stop interval and show completion
|
||||
clearInterval(missingTasksInterval);
|
||||
missingProgressData.percentComplete = 100;
|
||||
missingProgressData.tasksAnalyzed = missingProgressData.totalTasks;
|
||||
missingProgressData.completed = true;
|
||||
displayAnalysisProgress(missingProgressData);
|
||||
} catch (error) {
|
||||
// Clean up on error
|
||||
if (missingTasksInterval) {
|
||||
clearInterval(missingTasksInterval);
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
// Always clean up SIGINT handler and interval
|
||||
cleanupSigintHandler();
|
||||
if (missingTasksInterval) {
|
||||
clearInterval(missingTasksInterval);
|
||||
}
|
||||
}
|
||||
const result = await perplexity.chat.completions.create({
|
||||
model: process.env.PERPLEXITY_MODEL || 'sonar-pro',
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "You are a technical analysis AI that only responds with clean, valid JSON. Never include explanatory text or markdown formatting in your response."
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: missingTasksResearchPrompt
|
||||
}
|
||||
],
|
||||
temperature: CONFIG.temperature,
|
||||
max_tokens: CONFIG.maxTokens,
|
||||
});
|
||||
|
||||
// Extract the response
|
||||
missingAnalysisResponse = result.choices[0].message.content;
|
||||
} else {
|
||||
// Use Claude
|
||||
const stream = await anthropic.messages.create({
|
||||
@@ -2861,7 +2547,7 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
const codeBlockMatch = missingAnalysisResponse.match(/```(?:json)?\s*([\s\S]*?)\s*```/);
|
||||
if (codeBlockMatch) {
|
||||
cleanedResponse = codeBlockMatch[1];
|
||||
console.debug(chalk.blue("Extracted JSON from code block for missing tasks"));
|
||||
console.log(chalk.blue("Extracted JSON from code block for missing tasks"));
|
||||
} else {
|
||||
// Look for a complete JSON array pattern
|
||||
const jsonArrayMatch = missingAnalysisResponse.match(/(\[\s*\{\s*"[^"]*"\s*:[\s\S]*\}\s*\])/);
|
||||
@@ -2983,10 +2669,10 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
};
|
||||
|
||||
// Write the report to file
|
||||
console.log(chalk.blue(` Writing complexity report to ${outputPath}...`));
|
||||
console.log(chalk.blue(`Writing complexity report to ${outputPath}...`));
|
||||
writeJSON(outputPath, report);
|
||||
|
||||
console.log(chalk.green(` Task complexity analysis complete. Report written to ${outputPath}`));
|
||||
console.log(chalk.green(`Task complexity analysis complete. Report written to ${outputPath}`));
|
||||
|
||||
// Display a summary of findings
|
||||
const highComplexity = complexityAnalysis.filter(t => t.complexityScore >= 8).length;
|
||||
@@ -2994,43 +2680,24 @@ DO NOT include any text before or after the JSON array. No explanations, no mark
|
||||
const lowComplexity = complexityAnalysis.filter(t => t.complexityScore < 5).length;
|
||||
const totalAnalyzed = complexityAnalysis.length;
|
||||
|
||||
// Only show summary if we didn't encounter an API error
|
||||
if (!apiError) {
|
||||
// Create a summary object for formatting
|
||||
const summary = {
|
||||
totalTasks: tasksData.tasks.length,
|
||||
analyzedTasks: totalAnalyzed,
|
||||
highComplexityCount: highComplexity,
|
||||
mediumComplexityCount: mediumComplexity,
|
||||
lowComplexityCount: lowComplexity,
|
||||
researchBacked: useResearch
|
||||
};
|
||||
|
||||
// Use the new formatting function from UI module
|
||||
console.log(formatComplexitySummary(summary));
|
||||
}
|
||||
console.log('\nComplexity Analysis Summary:');
|
||||
console.log('----------------------------');
|
||||
console.log(`Tasks in input file: ${tasksData.tasks.length}`);
|
||||
console.log(`Tasks successfully analyzed: ${totalAnalyzed}`);
|
||||
console.log(`High complexity tasks: ${highComplexity}`);
|
||||
console.log(`Medium complexity tasks: ${mediumComplexity}`);
|
||||
console.log(`Low complexity tasks: ${lowComplexity}`);
|
||||
console.log(`Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})`);
|
||||
console.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);
|
||||
console.log(`\nSee ${outputPath} for the full report and expansion commands.`);
|
||||
|
||||
} catch (error) {
|
||||
if (streamingInterval) clearInterval(streamingInterval);
|
||||
stopLoadingIndicator(loadingIndicator);
|
||||
|
||||
// Mark that we encountered an API error
|
||||
apiError = true;
|
||||
|
||||
// Display a user-friendly error message
|
||||
console.error(chalk.red(`\nAPI Error: ${error.message || 'Unknown error'}\n`));
|
||||
console.log(chalk.yellow('This might be a temporary issue with the Claude API.'));
|
||||
console.log(chalk.yellow('Please try again in a few moments.'));
|
||||
cleanupSigintHandler();
|
||||
|
||||
// We'll continue with any tasks we might have analyzed before the error
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(chalk.red(`Error analyzing task complexity: ${error.message}`));
|
||||
|
||||
// Clean up SIGINT handler
|
||||
cleanupSigintHandler();
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1061,255 +1061,6 @@ async function displayComplexityReport(reportPath) {
|
||||
));
|
||||
}
|
||||
|
||||
/**
|
||||
* Display real-time analysis progress with detailed information in a single line format
|
||||
* @param {Object} progressData - Object containing progress information
|
||||
* @param {string} progressData.model - Model name (e.g., 'claude-3-7-sonnet-20250219')
|
||||
* @param {number} progressData.contextTokens - Context tokens used
|
||||
* @param {number} progressData.elapsed - Elapsed time in seconds
|
||||
* @param {number} progressData.temperature - Temperature setting
|
||||
* @param {number} progressData.tasksAnalyzed - Number of tasks analyzed so far
|
||||
* @param {number} progressData.totalTasks - Total number of tasks to analyze
|
||||
* @param {number} progressData.percentComplete - Percentage complete (0-100)
|
||||
* @param {number} progressData.maxTokens - Maximum tokens setting
|
||||
* @param {boolean} progressData.completed - Whether the process is completed
|
||||
* @returns {void}
|
||||
*/
|
||||
function displayAnalysisProgress(progressData) {
|
||||
const {
|
||||
model,
|
||||
contextTokens = 0,
|
||||
elapsed = 0,
|
||||
temperature = 0.7,
|
||||
tasksAnalyzed = 0,
|
||||
totalTasks = 0,
|
||||
percentComplete = 0,
|
||||
maxTokens = 0,
|
||||
completed = false
|
||||
} = progressData;
|
||||
|
||||
// Format the elapsed time
|
||||
const timeDisplay = formatElapsedTime(elapsed);
|
||||
|
||||
// Use static variables to track display state
|
||||
if (displayAnalysisProgress.initialized === undefined) {
|
||||
displayAnalysisProgress.initialized = false;
|
||||
displayAnalysisProgress.lastUpdate = Date.now();
|
||||
displayAnalysisProgress.statusLineStarted = false;
|
||||
}
|
||||
|
||||
// Create progress bar (20 characters wide)
|
||||
const progressBarWidth = 20;
|
||||
const percentText = `${Math.round(percentComplete)}%`;
|
||||
const percentTextLength = percentText.length;
|
||||
|
||||
// Calculate expected total tokens and current progress
|
||||
const totalTokens = contextTokens; // Use the actual token count as the total
|
||||
|
||||
// Calculate current tokens based on percentage complete to show gradual increase from 0 to totalTokens
|
||||
const currentTokens = completed ? totalTokens : Math.min(totalTokens, Math.round((percentComplete / 100) * totalTokens));
|
||||
|
||||
// Format token counts with proper padding
|
||||
const totalTokenDigits = totalTokens.toString().length;
|
||||
const currentTokensFormatted = currentTokens.toString().padStart(totalTokenDigits, '0');
|
||||
const tokenDisplay = `${currentTokensFormatted}/${totalTokens}`;
|
||||
|
||||
// Calculate position for centered percentage
|
||||
const halfBarWidth = Math.floor(progressBarWidth / 2);
|
||||
const percentStartPos = Math.max(0, halfBarWidth - Math.floor(percentTextLength / 2));
|
||||
|
||||
// Calculate how many filled and empty chars to draw
|
||||
const filledChars = Math.floor((percentComplete / 100) * progressBarWidth);
|
||||
|
||||
// Create the progress bar with centered percentage (without gradient)
|
||||
let progressBar = '';
|
||||
for (let i = 0; i < progressBarWidth; i++) {
|
||||
// If we're at the start position for the percentage text
|
||||
if (i === percentStartPos) {
|
||||
// Apply bold white for percentage text to stand out
|
||||
progressBar += chalk.bold.white(percentText);
|
||||
// Skip ahead by the length of the percentage text
|
||||
i += percentTextLength - 1;
|
||||
} else if (i < filledChars) {
|
||||
// Use a single color instead of gradient
|
||||
progressBar += chalk.cyan('█');
|
||||
} else {
|
||||
// Use a subtle character for empty space
|
||||
progressBar += chalk.gray('░');
|
||||
}
|
||||
}
|
||||
|
||||
// Use spinner from ora - these are the actual frames used in the default spinner
|
||||
const spinnerFrames = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
|
||||
|
||||
// Increment the counter faster to speed up the animation
|
||||
if (!displayAnalysisProgress.frameCounter) {
|
||||
displayAnalysisProgress.frameCounter = 0;
|
||||
}
|
||||
if (!displayAnalysisProgress.updateToggle) {
|
||||
displayAnalysisProgress.updateToggle = false;
|
||||
}
|
||||
|
||||
// Toggle between updating and not updating to halve the speed
|
||||
displayAnalysisProgress.updateToggle = !displayAnalysisProgress.updateToggle;
|
||||
|
||||
// Only update every other call to make animation half as fast
|
||||
if (displayAnalysisProgress.updateToggle) {
|
||||
displayAnalysisProgress.frameCounter = (displayAnalysisProgress.frameCounter + 1) % spinnerFrames.length;
|
||||
}
|
||||
|
||||
const spinner = chalk.cyan(spinnerFrames[displayAnalysisProgress.frameCounter]);
|
||||
|
||||
// Format status line based on whether we're complete or not
|
||||
let statusLine;
|
||||
|
||||
if (completed) {
|
||||
// For completed progress, show checkmark and "Complete" text
|
||||
statusLine =
|
||||
` ${chalk.cyan('⏱')} ${timeDisplay} ${chalk.gray('|')} ` +
|
||||
`Tasks: ${chalk.bold(tasksAnalyzed)}/${totalTasks} ${chalk.gray('|')} ` +
|
||||
`Tokens: ${tokenDisplay} ${chalk.gray('|')} ` +
|
||||
`${progressBar} ${chalk.gray('|')} ` +
|
||||
`${chalk.green('✅')} ${chalk.green('Complete')}`;
|
||||
} else {
|
||||
// For in-progress, show spinner and "Processing" text
|
||||
statusLine =
|
||||
` ${chalk.cyan('⏱')} ${timeDisplay} ${chalk.gray('|')} ` +
|
||||
`Tasks: ${chalk.bold(tasksAnalyzed)}/${totalTasks} ${chalk.gray('|')} ` +
|
||||
`Tokens: ${tokenDisplay} ${chalk.gray('|')} ` +
|
||||
`${progressBar} ${chalk.gray('|')} ` +
|
||||
`${chalk.cyan('Processing')} ${spinner}`;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Clear the line and update the status
|
||||
process.stdout.write('\r\x1B[K');
|
||||
process.stdout.write(statusLine);
|
||||
|
||||
// Additional handling for completion
|
||||
if (completed) {
|
||||
// Move to next line and print completion message in a box
|
||||
process.stdout.write('\n\n');
|
||||
|
||||
console.log(boxen(
|
||||
chalk.green(`Task complexity analysis completed in ${timeDisplay}`) + '\n' +
|
||||
chalk.green(`✅ Analyzed ${tasksAnalyzed} tasks successfully.`),
|
||||
{
|
||||
padding: { top: 1, bottom: 1, left: 2, right: 2 },
|
||||
margin: { top: 0, bottom: 1 },
|
||||
borderColor: 'green',
|
||||
borderStyle: 'round'
|
||||
}
|
||||
));
|
||||
|
||||
// Reset initialization state for next run
|
||||
displayAnalysisProgress.initialized = undefined;
|
||||
displayAnalysisProgress.statusLineStarted = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Format elapsed time in the format shown in the screenshot (0m 00s)
|
||||
* @param {number} seconds - Elapsed time in seconds
|
||||
* @returns {string} Formatted time string
|
||||
*/
|
||||
function formatElapsedTime(seconds) {
|
||||
const minutes = Math.floor(seconds / 60);
|
||||
const remainingSeconds = Math.floor(seconds % 60);
|
||||
return `${minutes}m ${remainingSeconds.toString().padStart(2, '0')}s`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a complexity summary from analyze-complexity with a neat boxed display
|
||||
* @param {Object} summary The complexity analysis summary
|
||||
* @returns {string} The formatted summary
|
||||
*/
|
||||
function formatComplexitySummary(summary) {
|
||||
// Calculate verification sum
|
||||
const sumTotal = summary.highComplexityCount + summary.mediumComplexityCount + summary.lowComplexityCount;
|
||||
const verificationStatus = sumTotal === summary.analyzedTasks ? chalk.green('✅') : chalk.red('✗');
|
||||
|
||||
// Create a table for better alignment
|
||||
const table = new Table({
|
||||
chars: {
|
||||
'top': '', 'top-mid': '', 'top-left': '', 'top-right': '',
|
||||
'bottom': '', 'bottom-mid': '', 'bottom-left': '', 'bottom-right': '',
|
||||
'left': '', 'left-mid': '', 'mid': '', 'mid-mid': '',
|
||||
'right': '', 'right-mid': '', 'middle': ' '
|
||||
},
|
||||
style: { border: [], 'padding-left': 2 },
|
||||
colWidths: [28, 50]
|
||||
});
|
||||
|
||||
// Basic info
|
||||
table.push(
|
||||
[chalk.cyan('Tasks in input file:'), chalk.bold(summary.totalTasks)],
|
||||
[chalk.cyan('Tasks analyzed:'), chalk.bold(summary.analyzedTasks)]
|
||||
);
|
||||
|
||||
// Complexity distribution in one row
|
||||
const percentHigh = Math.round((summary.highComplexityCount / summary.analyzedTasks) * 100);
|
||||
const percentMed = Math.round((summary.mediumComplexityCount / summary.analyzedTasks) * 100);
|
||||
const percentLow = Math.round((summary.lowComplexityCount / summary.analyzedTasks) * 100);
|
||||
|
||||
const complexityRow = [
|
||||
chalk.cyan('Complexity distribution:'),
|
||||
`${chalk.hex('#CC0000').bold(summary.highComplexityCount)} ${chalk.hex('#CC0000')('High')} (${percentHigh}%) · ` +
|
||||
`${chalk.hex('#FF8800').bold(summary.mediumComplexityCount)} ${chalk.hex('#FF8800')('Medium')} (${percentMed}%) · ` +
|
||||
`${chalk.yellow.bold(summary.lowComplexityCount)} ${chalk.yellow('Low')} (${percentLow}%)`
|
||||
];
|
||||
table.push(complexityRow);
|
||||
|
||||
// Visual bar representation of complexity distribution
|
||||
const barWidth = 40; // Total width of the bar
|
||||
|
||||
// Only show bars for categories with at least 1 task
|
||||
const highChars = summary.highComplexityCount > 0 ?
|
||||
Math.max(1, Math.round((summary.highComplexityCount / summary.analyzedTasks) * barWidth)) : 0;
|
||||
|
||||
const medChars = summary.mediumComplexityCount > 0 ?
|
||||
Math.max(1, Math.round((summary.mediumComplexityCount / summary.analyzedTasks) * barWidth)) : 0;
|
||||
|
||||
const lowChars = summary.lowComplexityCount > 0 ?
|
||||
Math.max(1, Math.round((summary.lowComplexityCount / summary.analyzedTasks) * barWidth)) : 0;
|
||||
|
||||
// Adjust bar width if some categories have 0 tasks
|
||||
const actualBarWidth = highChars + medChars + lowChars;
|
||||
|
||||
const distributionBar =
|
||||
chalk.hex('#CC0000')('█'.repeat(highChars)) +
|
||||
chalk.hex('#FF8800')('█'.repeat(medChars)) +
|
||||
chalk.yellow('█'.repeat(lowChars)) +
|
||||
// Add empty space if actual bar is shorter than expected
|
||||
(actualBarWidth < barWidth ? chalk.gray('░'.repeat(barWidth - actualBarWidth)) : '');
|
||||
|
||||
table.push([chalk.cyan('Distribution:'), distributionBar]);
|
||||
|
||||
// Add verification and research status
|
||||
table.push(
|
||||
[chalk.cyan('Verification:'), `${verificationStatus} ${sumTotal}/${summary.analyzedTasks}`],
|
||||
[chalk.cyan('Research-backed:'), summary.researchBacked ? chalk.green('✅') : 'No']
|
||||
);
|
||||
|
||||
// Final string output with title and footer
|
||||
const output = [
|
||||
chalk.bold.underline('Complexity Analysis Summary'),
|
||||
'',
|
||||
table.toString(),
|
||||
'',
|
||||
`Report saved to: ${chalk.italic('scripts/task-complexity-report.json')}`
|
||||
].join('\n');
|
||||
|
||||
// Return a boxed version
|
||||
return boxen(output, {
|
||||
padding: { top: 1, right: 1, bottom: 1, left: 1 },
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round',
|
||||
margin: { top: 1, right: 1, bottom: 1, left: 0 }
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Confirm overwriting existing tasks.json file
|
||||
* @param {string} tasksPath - Path to the tasks.json file
|
||||
@@ -1337,38 +1088,6 @@ async function confirmTaskOverwrite(tasksPath) {
|
||||
return answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes';
|
||||
}
|
||||
|
||||
/**
|
||||
* Display the start of complexity analysis with a boxen announcement
|
||||
* @param {string} tasksPath - Path to the tasks file being analyzed
|
||||
* @param {string} outputPath - Path where the report will be saved
|
||||
* @param {boolean} useResearch - Whether Perplexity AI research is enabled
|
||||
* @param {string} model - AI model name
|
||||
* @param {number} temperature - AI temperature setting
|
||||
*/
|
||||
function displayComplexityAnalysisStart(tasksPath, outputPath, useResearch = false, model = CONFIG.model, temperature = CONFIG.temperature) {
|
||||
// Create the message content with all information
|
||||
let message = chalk.bold(`🤖 Analyzing Task Complexity`) + '\n' +
|
||||
chalk.dim(`Model: ${model} | Temperature: ${temperature}`) + '\n\n' +
|
||||
chalk.blue(`Input: ${tasksPath}`) + '\n' +
|
||||
chalk.blue(`Output: ${outputPath}`);
|
||||
|
||||
// Add research info if enabled
|
||||
if (useResearch) {
|
||||
message += '\n' + chalk.blue('Using Perplexity AI for research-backed analysis');
|
||||
}
|
||||
|
||||
// Display everything in a single boxen
|
||||
console.log(boxen(
|
||||
message,
|
||||
{
|
||||
padding: { top: 1, bottom: 1, left: 2, right: 2 },
|
||||
margin: { top: 0, bottom: 0 },
|
||||
borderColor: 'blue',
|
||||
borderStyle: 'round'
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
// Export UI functions
|
||||
export {
|
||||
displayBanner,
|
||||
@@ -1381,9 +1100,6 @@ export {
|
||||
getComplexityWithColor,
|
||||
displayNextTask,
|
||||
displayTaskById,
|
||||
displayComplexityAnalysisStart,
|
||||
displayComplexityReport,
|
||||
displayAnalysisProgress,
|
||||
formatComplexitySummary,
|
||||
confirmTaskOverwrite
|
||||
};
|
||||
102
tasks/task_040.txt
Normal file
102
tasks/task_040.txt
Normal file
@@ -0,0 +1,102 @@
|
||||
# Task ID: 40
|
||||
# Title: Implement Project Funding Documentation and Support Infrastructure
|
||||
# Status: in-progress
|
||||
# Dependencies: None
|
||||
# Priority: medium
|
||||
# Description: Create FUNDING.yml for GitHub Sponsors integration that outlines all financial support options for the Task Master project.
|
||||
# Details:
|
||||
This task involves creating a FUNDING.yml file to enable and manage funding options for the Task Master project:
|
||||
|
||||
**FUNDING.yml file**:
|
||||
- Create a .github/FUNDING.yml file following GitHub's specifications
|
||||
- Include configuration for multiple funding platforms:
|
||||
- GitHub Sponsors (primary if available)
|
||||
- Open Collective
|
||||
- Patreon
|
||||
- Ko-fi
|
||||
- Liberapay
|
||||
- Custom funding URLs (project website donation page)
|
||||
- Research and reference successful implementation patterns from Vue.js, React, and TypeScript projects
|
||||
- Ensure the FUNDING.yml contains sufficient information to guide users on how to support the project
|
||||
- Include comments within the YAML file to provide context for each funding option
|
||||
|
||||
The implementation should maintain consistent branding and messaging with the rest of the Task Master project. Research at least 5 successful open source projects to identify best practices in funding configuration.
|
||||
|
||||
# Test Strategy:
|
||||
Testing should verify the technical implementation of the FUNDING.yml file:
|
||||
|
||||
1. **FUNDING.yml validation**:
|
||||
- Verify the file is correctly placed in the .github directory
|
||||
- Validate YAML syntax using a linter
|
||||
- Test that GitHub correctly displays funding options on the repository page
|
||||
- Verify all links to external funding platforms are functional
|
||||
|
||||
2. **User experience testing**:
|
||||
- Test the complete funding workflow from a potential supporter's perspective
|
||||
- Verify the process is intuitive and barriers to contribution are minimized
|
||||
- Check that the Sponsor button appears correctly on GitHub
|
||||
- Ensure all funding platform links resolve to the correct destinations
|
||||
- Gather feedback from 2-3 potential users on clarity and ease of use
|
||||
|
||||
# Subtasks:
|
||||
## 1. Research and Create FUNDING.yml File [done]
|
||||
### Dependencies: None
|
||||
### Description: Research successful funding configurations and create the .github/FUNDING.yml file for GitHub Sponsors integration and other funding platforms.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create the .github directory at the project root if it doesn't exist
|
||||
2. Research funding configurations from 5 successful open source projects (Vue.js, React, TypeScript, etc.)
|
||||
3. Document the patterns and approaches used in these projects
|
||||
4. Create the FUNDING.yml file with the following platforms:
|
||||
- GitHub Sponsors (primary)
|
||||
- Open Collective
|
||||
- Patreon
|
||||
- Ko-fi
|
||||
- Liberapay
|
||||
- Custom donation URL for the project website
|
||||
5. Validate the YAML syntax using a linter
|
||||
6. Test the file by pushing to a test branch and verifying the Sponsor button appears correctly on GitHub
|
||||
|
||||
Testing approach:
|
||||
- Validate YAML syntax using yamllint or similar tool
|
||||
- Test on GitHub by checking if the Sponsor button appears in the repository
|
||||
- Verify each funding link resolves to the correct destination
|
||||
|
||||
## 4. Add Documentation Comments to FUNDING.yml [pending]
|
||||
### Dependencies: 40.1
|
||||
### Description: Add comprehensive comments within the FUNDING.yml file to provide context and guidance for each funding option.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Add a header comment explaining the purpose of the file
|
||||
2. For each funding platform entry, add comments that explain:
|
||||
- What the platform is
|
||||
- How funds are processed on this platform
|
||||
- Any specific benefits of using this platform
|
||||
- Brief instructions for potential sponsors
|
||||
3. Include a comment about how sponsors will be acknowledged
|
||||
4. Add information about fund allocation (maintenance, new features, infrastructure)
|
||||
5. Ensure comments follow YAML comment syntax and don't break the file structure
|
||||
|
||||
Testing approach:
|
||||
- Validate that the YAML file still passes linting with comments added
|
||||
- Verify the file still functions correctly on GitHub
|
||||
- Have at least one team member review the comments for clarity and completeness
|
||||
|
||||
## 5. Integrate Funding Information in Project README [pending]
|
||||
### Dependencies: 40.1, 40.4
|
||||
### Description: Add a section to the project README that highlights the funding options and directs users to the Sponsor button.
|
||||
### Details:
|
||||
Implementation steps:
|
||||
1. Create a 'Support the Project' or 'Sponsorship' section in the README.md
|
||||
2. Explain briefly why financial support matters for the project
|
||||
3. Direct users to the GitHub Sponsor button
|
||||
4. Mention the alternative funding platforms available
|
||||
5. Include a brief note on how funds will be used
|
||||
6. Add any relevant funding badges (e.g., Open Collective, GitHub Sponsors)
|
||||
|
||||
Testing approach:
|
||||
- Review the README section for clarity and conciseness
|
||||
- Verify all links work correctly
|
||||
- Ensure the section is appropriately visible but doesn't overshadow project information
|
||||
- Check that badges render correctly
|
||||
|
||||
89
tasks/task_041.txt
Normal file
89
tasks/task_041.txt
Normal file
@@ -0,0 +1,89 @@
|
||||
# Task ID: 41
|
||||
# Title: Implement GitHub Actions CI Workflow for Task Master
|
||||
# Status: pending
|
||||
# Dependencies: None
|
||||
# Priority: high
|
||||
# Description: Create a streamlined CI workflow file (ci.yml) that efficiently tests the Task Master codebase using GitHub Actions.
|
||||
# Details:
|
||||
Create a GitHub Actions workflow file at `.github/workflows/ci.yml` with the following specifications:
|
||||
|
||||
1. Configure the workflow to trigger on:
|
||||
- Push events to any branch
|
||||
- Pull request events targeting any branch
|
||||
|
||||
2. Core workflow configuration:
|
||||
- Use Ubuntu latest as the primary testing environment
|
||||
- Use Node.js 20.x (LTS) for consistency with the project
|
||||
- Focus on single environment for speed and simplicity
|
||||
|
||||
3. Configure workflow steps to:
|
||||
- Checkout the repository using actions/checkout@v4
|
||||
- Set up Node.js using actions/setup-node@v4 with npm caching
|
||||
- Install dependencies with 'npm ci'
|
||||
- Run tests with 'npm run test:coverage'
|
||||
|
||||
4. Implement efficient caching:
|
||||
- Cache node_modules using actions/cache@v4
|
||||
- Use package-lock.json hash for cache key
|
||||
- Implement proper cache restoration keys
|
||||
|
||||
5. Ensure proper timeouts:
|
||||
- 2 minutes for dependency installation
|
||||
- Appropriate timeout for test execution
|
||||
|
||||
6. Artifact handling:
|
||||
- Upload test results and coverage reports
|
||||
- Use consistent naming for artifacts
|
||||
- Retain artifacts for 30 days
|
||||
|
||||
# Test Strategy:
|
||||
To verify correct implementation of the GitHub Actions CI workflow:
|
||||
|
||||
1. Manual verification:
|
||||
- Check that the file is correctly placed at `.github/workflows/ci.yml`
|
||||
- Verify the YAML syntax is valid
|
||||
- Confirm all required configurations are present
|
||||
|
||||
2. Functional testing:
|
||||
- Push a commit to verify the workflow triggers
|
||||
- Create a PR to verify the workflow runs on pull requests
|
||||
- Verify test coverage reports are generated and uploaded
|
||||
- Confirm caching is working effectively
|
||||
|
||||
3. Performance testing:
|
||||
- Verify cache hits reduce installation time
|
||||
- Confirm workflow completes within expected timeframe
|
||||
- Check artifact upload and download speeds
|
||||
|
||||
# Subtasks:
|
||||
## 1. Create Basic GitHub Actions Workflow [pending]
|
||||
### Dependencies: None
|
||||
### Description: Set up the foundational GitHub Actions workflow file with proper triggers and Node.js setup
|
||||
### Details:
|
||||
1. Create `.github/workflows/ci.yml`
|
||||
2. Configure workflow name and triggers
|
||||
3. Set up Ubuntu runner and Node.js 20.x
|
||||
4. Implement checkout and Node.js setup actions
|
||||
5. Configure npm caching
|
||||
6. Test basic workflow functionality
|
||||
|
||||
## 2. Implement Test and Coverage Steps [pending]
|
||||
### Dependencies: 41.1
|
||||
### Description: Add test execution and coverage reporting to the workflow
|
||||
### Details:
|
||||
1. Add dependency installation with proper timeout
|
||||
2. Configure test execution with coverage
|
||||
3. Set up test results and coverage artifacts
|
||||
4. Verify artifact upload functionality
|
||||
5. Test the complete workflow
|
||||
|
||||
## 3. Optimize Workflow Performance [pending]
|
||||
### Dependencies: 41.1, 41.2
|
||||
### Description: Implement caching and performance optimizations
|
||||
### Details:
|
||||
1. Set up node_modules caching
|
||||
2. Configure cache key strategy
|
||||
3. Implement proper timeout values
|
||||
4. Test caching effectiveness
|
||||
5. Document performance improvements
|
||||
|
||||
@@ -2208,6 +2208,94 @@
|
||||
"parentTaskId": 39
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 40,
|
||||
"title": "Implement Project Funding Documentation and Support Infrastructure",
|
||||
"description": "Create FUNDING.yml for GitHub Sponsors integration that outlines all financial support options for the Task Master project.",
|
||||
"status": "in-progress",
|
||||
"dependencies": [],
|
||||
"priority": "medium",
|
||||
"details": "This task involves creating a FUNDING.yml file to enable and manage funding options for the Task Master project:\n\n**FUNDING.yml file**:\n - Create a .github/FUNDING.yml file following GitHub's specifications\n - Include configuration for multiple funding platforms:\n - GitHub Sponsors (primary if available)\n - Open Collective\n - Patreon\n - Ko-fi\n - Liberapay\n - Custom funding URLs (project website donation page)\n - Research and reference successful implementation patterns from Vue.js, React, and TypeScript projects\n - Ensure the FUNDING.yml contains sufficient information to guide users on how to support the project\n - Include comments within the YAML file to provide context for each funding option\n\nThe implementation should maintain consistent branding and messaging with the rest of the Task Master project. Research at least 5 successful open source projects to identify best practices in funding configuration.",
|
||||
"testStrategy": "Testing should verify the technical implementation of the FUNDING.yml file:\n\n1. **FUNDING.yml validation**:\n - Verify the file is correctly placed in the .github directory\n - Validate YAML syntax using a linter\n - Test that GitHub correctly displays funding options on the repository page\n - Verify all links to external funding platforms are functional\n\n2. **User experience testing**:\n - Test the complete funding workflow from a potential supporter's perspective\n - Verify the process is intuitive and barriers to contribution are minimized\n - Check that the Sponsor button appears correctly on GitHub\n - Ensure all funding platform links resolve to the correct destinations\n - Gather feedback from 2-3 potential users on clarity and ease of use",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Research and Create FUNDING.yml File",
|
||||
"description": "Research successful funding configurations and create the .github/FUNDING.yml file for GitHub Sponsors integration and other funding platforms.",
|
||||
"dependencies": [],
|
||||
"details": "Implementation steps:\n1. Create the .github directory at the project root if it doesn't exist\n2. Research funding configurations from 5 successful open source projects (Vue.js, React, TypeScript, etc.)\n3. Document the patterns and approaches used in these projects\n4. Create the FUNDING.yml file with the following platforms:\n - GitHub Sponsors (primary)\n - Open Collective\n - Patreon\n - Ko-fi\n - Liberapay\n - Custom donation URL for the project website\n5. Validate the YAML syntax using a linter\n6. Test the file by pushing to a test branch and verifying the Sponsor button appears correctly on GitHub\n\nTesting approach:\n- Validate YAML syntax using yamllint or similar tool\n- Test on GitHub by checking if the Sponsor button appears in the repository\n- Verify each funding link resolves to the correct destination",
|
||||
"status": "done",
|
||||
"parentTaskId": 40
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "Add Documentation Comments to FUNDING.yml",
|
||||
"description": "Add comprehensive comments within the FUNDING.yml file to provide context and guidance for each funding option.",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "Implementation steps:\n1. Add a header comment explaining the purpose of the file\n2. For each funding platform entry, add comments that explain:\n - What the platform is\n - How funds are processed on this platform\n - Any specific benefits of using this platform\n - Brief instructions for potential sponsors\n3. Include a comment about how sponsors will be acknowledged\n4. Add information about fund allocation (maintenance, new features, infrastructure)\n5. Ensure comments follow YAML comment syntax and don't break the file structure\n\nTesting approach:\n- Validate that the YAML file still passes linting with comments added\n- Verify the file still functions correctly on GitHub\n- Have at least one team member review the comments for clarity and completeness",
|
||||
"status": "pending",
|
||||
"parentTaskId": 40
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Integrate Funding Information in Project README",
|
||||
"description": "Add a section to the project README that highlights the funding options and directs users to the Sponsor button.",
|
||||
"dependencies": [
|
||||
1,
|
||||
4
|
||||
],
|
||||
"details": "Implementation steps:\n1. Create a 'Support the Project' or 'Sponsorship' section in the README.md\n2. Explain briefly why financial support matters for the project\n3. Direct users to the GitHub Sponsor button\n4. Mention the alternative funding platforms available\n5. Include a brief note on how funds will be used\n6. Add any relevant funding badges (e.g., Open Collective, GitHub Sponsors)\n\nTesting approach:\n- Review the README section for clarity and conciseness\n- Verify all links work correctly\n- Ensure the section is appropriately visible but doesn't overshadow project information\n- Check that badges render correctly",
|
||||
"status": "pending",
|
||||
"parentTaskId": 40
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 41,
|
||||
"title": "Implement GitHub Actions CI Workflow for Cross-Platform Testing",
|
||||
"description": "Create a CI workflow file (ci.yml) that tests the codebase across multiple Node.js versions and operating systems using GitHub Actions.",
|
||||
"status": "pending",
|
||||
"dependencies": [],
|
||||
"priority": "high",
|
||||
"details": "Create a GitHub Actions workflow file at `.github/workflows/ci.yml` with the following specifications:\n\n1. Configure the workflow to trigger on:\n - Push events to any branch\n - Pull request events targeting any branch\n\n2. Implement a matrix strategy that tests across:\n - Node.js versions: 18.x, 20.x, and 22.x\n - Operating systems: Ubuntu-latest and Windows-latest\n\n3. Include proper Git configuration steps:\n - Set Git user name to 'GitHub Actions'\n - Set Git email to 'github-actions@github.com'\n\n4. Configure workflow steps to:\n - Checkout the repository using actions/checkout@v3\n - Set up Node.js using actions/setup-node@v3 with the matrix version\n - Use npm for package management (not pnpm)\n - Install dependencies with 'npm ci'\n - Run linting with 'npm run lint' (if available)\n - Run tests with 'npm test'\n - Run build process with 'npm run build'\n\n5. Implement concurrency controls to:\n - Cancel in-progress workflows when new commits are pushed to the same PR\n - Use a concurrency group based on the GitHub ref and workflow name\n\n6. Add proper caching for npm dependencies to speed up workflow runs\n\n7. Ensure the workflow includes appropriate timeouts to prevent hung jobs",
|
||||
"testStrategy": "To verify correct implementation of the GitHub Actions CI workflow:\n\n1. Manual verification:\n - Check that the file is correctly placed at `.github/workflows/ci.yml`\n - Verify the YAML syntax is valid using a YAML linter\n - Confirm all required configurations (triggers, matrix, steps) are present\n\n2. Functional testing:\n - Push a commit to a feature branch to confirm the workflow triggers\n - Create a PR to verify the workflow runs on pull requests\n - Verify the workflow successfully runs on both Ubuntu and Windows\n - Confirm tests run against all three Node.js versions (18, 20, 22)\n - Test concurrency by pushing multiple commits to the same PR rapidly\n\n3. Edge case testing:\n - Introduce a failing test and verify the workflow reports failure\n - Test with a large dependency tree to verify caching works correctly\n - Verify the workflow handles non-ASCII characters in file paths correctly (particularly on Windows)\n\n4. Check workflow logs to ensure:\n - Git configuration is applied correctly\n - Dependencies are installed with npm (not pnpm)\n - All matrix combinations run independently\n - Concurrency controls cancel redundant workflow runs",
|
||||
"subtasks": [
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Create Basic GitHub Actions Workflow Structure",
|
||||
"description": "Set up the foundational GitHub Actions workflow file with triggers, checkout, and Node.js setup using matrix strategy",
|
||||
"dependencies": [],
|
||||
"details": "1. Create `.github/workflows/` directory if it doesn't exist\n2. Create a new file `ci.yml` inside this directory\n3. Define the workflow name at the top of the file\n4. Configure triggers for push events to any branch and pull request events targeting any branch\n5. Set up the matrix strategy for Node.js versions (18.x, 20.x, 22.x) and operating systems (Ubuntu-latest, Windows-latest)\n6. Configure the job to checkout the repository using actions/checkout@v3\n7. Set up Node.js using actions/setup-node@v3 with the matrix version\n8. Add proper caching for npm dependencies\n9. Test the workflow by pushing the file to a test branch and verifying it triggers correctly\n10. Verify that the matrix builds are running on all specified Node versions and operating systems",
|
||||
"status": "pending",
|
||||
"parentTaskId": 41
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Implement Build and Test Steps with Git Configuration",
|
||||
"description": "Add the core build and test steps to the workflow, including Git configuration, dependency installation, and execution of lint, test, and build commands",
|
||||
"dependencies": [
|
||||
1
|
||||
],
|
||||
"details": "1. Add Git configuration steps to set user name to 'GitHub Actions' and email to 'github-actions@github.com'\n2. Add step to install dependencies with 'npm ci'\n3. Add conditional step to run linting with 'npm run lint' if available\n4. Add step to run tests with 'npm test'\n5. Add step to run build process with 'npm run build'\n6. Ensure each step has appropriate names for clear visibility in GitHub Actions UI\n7. Add appropriate error handling and continue-on-error settings where needed\n8. Test the workflow by pushing a change and verifying all build steps execute correctly\n9. Verify that the workflow correctly runs on both Ubuntu and Windows environments\n10. Ensure that all commands use the correct syntax for cross-platform compatibility",
|
||||
"status": "pending",
|
||||
"parentTaskId": 41
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Add Workflow Optimization Features",
|
||||
"description": "Implement concurrency controls, timeouts, and other optimization features to improve workflow efficiency and reliability",
|
||||
"dependencies": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"details": "1. Implement concurrency controls to cancel in-progress workflows when new commits are pushed to the same PR\n2. Define a concurrency group based on the GitHub ref and workflow name\n3. Add appropriate timeouts to prevent hung jobs (typically 30-60 minutes depending on project complexity)\n4. Add status badges to the README.md file to show build status\n5. Optimize the workflow by adding appropriate 'if' conditions to skip unnecessary steps\n6. Add job summary outputs to provide clear information about the build results\n7. Test the concurrency feature by pushing multiple commits in quick succession to a PR\n8. Verify that old workflow runs are canceled when new commits are pushed\n9. Test timeout functionality by temporarily adding a long-running step\n10. Document the CI workflow in project documentation, explaining what it does and how to troubleshoot common issues",
|
||||
"status": "pending",
|
||||
"parentTaskId": 41
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user